From 34f488f41ee820371159111bf621f11d0f54f669 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 4 Aug 2019 10:56:44 +0200 Subject: Adding upstream version 1.16.1. Signed-off-by: Daniel Baumann --- .github/CODEOWNERS | 38 +- .gitignore | 4 +- .travis.yml | 1102 ++++++++++---------- .travis/README.md | 6 +- .travis/check_changelog_last_modification.sh | 21 + .travis/generate_changelog_for_nightlies.sh | 1 + .travis/nightlies.sh | 8 +- .travis/package_management/build.sh | 31 + .travis/package_management/common.py | 116 +++ .../configure_deb_lxc_environment.py | 62 +- .../configure_rpm_lxc_environment.py | 97 +- .travis/package_management/create_lxc_for_build.sh | 2 +- .travis/package_management/functions.sh | 4 +- .travis/package_management/prepare_packages.sh | 51 +- .../package_management/trigger_deb_lxc_build.py | 30 +- .travis/package_management/yank_stale_rpm.sh | 2 +- CHANGELOG.md | 122 ++- CMakeLists.txt | 1 + CONTRIBUTORS.md | 6 +- DOCUMENTATION.md | 53 + Makefile.am | 1 + README.md | 46 +- backends/README.md | 4 +- backends/opentsdb/README.md | 13 +- collectors/apps.plugin/apps_plugin.c | 281 ++++- collectors/cgroups.plugin/README.md | 2 + collectors/cgroups.plugin/cgroup-name.sh.in | 19 +- collectors/cgroups.plugin/cgroup-network.c | 2 +- collectors/cgroups.plugin/sys_fs_cgroup.c | 28 +- collectors/diskspace.plugin/README.md | 4 +- collectors/diskspace.plugin/plugin_diskspace.c | 8 +- collectors/freebsd.plugin/README.md | 2 + collectors/freebsd.plugin/freebsd_devstat.c | 25 +- collectors/freebsd.plugin/freebsd_getifaddrs.c | 25 +- collectors/freebsd.plugin/freebsd_getmntinfo.c | 8 +- collectors/freebsd.plugin/freebsd_sysctl.c | 160 +-- collectors/macos.plugin/README.md | 2 + collectors/macos.plugin/macos_sysctl.c | 154 +-- collectors/plugins.d/plugins_d.c | 242 ++++- collectors/plugins.d/plugins_d.h | 1 + collectors/proc.plugin/README.md | 2 +- collectors/proc.plugin/proc_diskstats.c | 59 +- collectors/proc.plugin/proc_meminfo.c | 18 +- collectors/proc.plugin/proc_net_dev.c | 25 +- collectors/proc.plugin/proc_net_netstat.c | 79 +- collectors/proc.plugin/proc_net_sctp_snmp.c | 35 +- collectors/proc.plugin/proc_net_snmp.c | 147 ++- collectors/proc.plugin/proc_net_snmp6.c | 247 +++-- collectors/proc.plugin/proc_net_sockstat.c | 38 +- collectors/proc.plugin/proc_net_sockstat6.c | 20 +- collectors/proc.plugin/proc_net_stat_synproxy.c | 12 +- collectors/proc.plugin/proc_spl_kstat_zfs.c | 2 + collectors/proc.plugin/proc_vmstat.c | 8 +- .../proc.plugin/sys_devices_system_edac_mc.c | 6 +- collectors/proc.plugin/sys_devices_system_node.c | 3 +- collectors/proc.plugin/sys_fs_btrfs.c | 16 +- collectors/proc.plugin/sys_kernel_mm_ksm.c | 4 +- collectors/python.d.plugin/mongodb/mongodb.conf | 10 +- .../bases/FrameworkServices/SocketService.py | 4 +- .../python.d.plugin/rethinkdbs/rethinkdbs.chart.py | 17 +- .../python.d.plugin/sensors/sensors.chart.py | 2 +- .../python.d.plugin/smartd_log/smartd_log.chart.py | 12 +- .../python.d.plugin/unbound/unbound.chart.py | 37 +- collectors/statsd.plugin/statsd.c | 2 +- collectors/tc.plugin/README.md | 2 +- collectors/tc.plugin/plugin_tc.c | 34 +- configs.signatures | 2 +- configure.ac | 72 +- contrib/debian/changelog | 5 + contrib/debian/control | 39 +- contrib/debian/control.jessie | 56 + contrib/debian/install_go.sh | 96 ++ contrib/debian/netdata.postinst.in | 7 +- contrib/debian/rules | 22 +- daemon/anonymous-statistics.sh.in | 18 - daemon/common.h | 1 + daemon/config/README.md | 3 +- daemon/main.c | 5 +- database/engine/journalfile.c | 12 +- database/engine/rrdengine.c | 44 +- database/rrddim.c | 4 +- database/rrdvar.c | 3 +- diagrams/netdata-overview.xml | 2 +- docs/Demo-Sites.md | 1 - docs/README.md | 7 + docs/Running-behind-apache.md | 45 +- docs/Running-behind-haproxy.md | 280 +++++ docs/Running-behind-nginx.md | 87 +- docs/configuration-guide.md | 2 +- docs/generator/buildhtml.sh | 16 +- docs/generator/buildyaml.sh | 22 +- docs/generator/checklinks.sh | 32 +- docs/generator/custom/css/netdata.css | 68 ++ docs/netdata-security.md | 2 +- docs/what-is-netdata.md | 385 +++++++ health/README.md | 43 +- health/health.d/dbengine.conf | 2 +- health/notifications/alarm-notify.sh.in | 48 +- health/notifications/health_alarm_notify.conf | 5 + libnetdata/json/json.c | 4 +- libnetdata/libnetdata.h | 2 + libnetdata/locks/locks.c | 33 +- libnetdata/popen/popen.c | 161 ++- libnetdata/socket/security.c | 77 +- libnetdata/socket/security.h | 5 +- libnetdata/socket/socket.c | 66 +- libnetdata/socket/socket.h | 8 +- libnetdata/string/utf8.h | 9 + libnetdata/url/url.c | 334 +++++- libnetdata/url/url.h | 7 + netdata-installer.sh | 38 +- netdata.spec.in | 21 + packaging/DISTRIBUTIONS.md | 37 + packaging/docker/Dockerfile | 8 +- packaging/docker/README.md | 69 +- packaging/docker/build-test.sh | 20 +- packaging/docker/publish.sh | 4 - packaging/docker/run.sh | 40 +- packaging/installer/README.md | 138 ++- packaging/installer/UNINSTALL.md | 2 +- packaging/installer/netdata-uninstaller.sh | 5 +- packaging/installer/netdata-updater.sh | 68 +- packaging/makeself/jobs/70-netdata-git.install.sh | 4 + packaging/version | 2 +- streaming/README.md | 73 +- streaming/rrdpush.c | 26 +- streaming/stream.conf | 16 + tests/Makefile.am | 9 + tests/acls/acl.sh.in | 119 +++ tests/acls/netdata.cfg | 20 + tests/acls/netdata.ssl.cfg | 24 + tests/urls/request.sh.in | 303 ++++++ web/api/badges/web_buffer_svg.c | 216 ++-- web/api/health/README.md | 49 +- web/api/health/health_cmdapi.c | 1 + web/api/web_api_v1.c | 14 +- web/gui/console.html | 2 +- web/gui/dashboard.js | 8 +- web/gui/dashboard_info.js | 2 +- web/gui/demosites.html | 37 +- web/gui/images/packaging-beta-tag.svg | 42 + web/gui/index.html | 59 +- web/gui/infographic.html | 8 +- web/gui/main.css | 344 +++--- web/gui/main.js | 11 +- web/gui/src/dashboard.js/main.js | 8 +- web/server/README.md | 60 +- web/server/web_client.c | 315 ++++-- web/server/web_client.h | 32 + 149 files changed, 5761 insertions(+), 2260 deletions(-) create mode 100755 .travis/check_changelog_last_modification.sh create mode 100644 .travis/package_management/build.sh create mode 100644 DOCUMENTATION.md create mode 100644 contrib/debian/changelog create mode 100644 contrib/debian/control.jessie create mode 100755 contrib/debian/install_go.sh create mode 100644 docs/README.md create mode 100644 docs/Running-behind-haproxy.md create mode 100644 docs/what-is-netdata.md create mode 100644 libnetdata/string/utf8.h create mode 100644 packaging/DISTRIBUTIONS.md create mode 100644 tests/acls/acl.sh.in create mode 100644 tests/acls/netdata.cfg create mode 100644 tests/acls/netdata.ssl.cfg create mode 100644 tests/urls/request.sh.in create mode 100644 web/gui/images/packaging-beta-tag.svg diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7d5dfa8b4..2b4e0d164 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,33 +13,33 @@ backends/json/ @thiagoftsm @vlvkobal backends/opentsdb/ @thiagoftsm @vlvkobal backends/prometheus/ @vlvkobal @paulkatsoulakis @thiagoftsm build/ @paulkatsoulakis @cakrit -collectors/ @vlvkobal @cakrit +collectors/ @vlvkobal @mfundul @cakrit collectors/charts.d.plugin/ @paulkatsoulakis @cakrit -collectors/freebsd.plugin/ @vlvkobal @cakrit -collectors/macos.plugin/ @vlvkobal @cakrit -collectors/node.d.plugin/ @gmosx @cakrit +collectors/freebsd.plugin/ @vlvkobal @thiagoftsm @cakrit +collectors/macos.plugin/ @vlvkobal @thiagoftsm @cakrit +collectors/node.d.plugin/ @VLegakis @cakrit collectors/node.d.plugin/fronius/ @ccremer @cakrit -collectors/node.d.plugin/snmp/ @gmosx @cakrit +collectors/node.d.plugin/snmp/ @VLegakis @cakrit collectors/node.d.plugin/stiebeleltron/ @ccremer @cakrit collectors/python.d.plugin/ @ilyam8 -collectors/cups.plugin/ @simonnagl @vlvkobal @cakrit +collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm @cakrit daemon/ @thiagoftsm @mfundul @cakrit -database/ @cakrit @mfundul -docs/ @cakrit -health/ @thiagoftsm @cakrit -health/health.d/ @thiagoftsm @cakrit -health/notifications/ @Ferroin @cakrit -libnetdata/ @thiagofsm @cakrit +database/ @mfundul @thiagoftsm @cakrit +docs/ @cakrit @joelhans +health/ @thiagoftsm @vlvkobal @cakrit +health/health.d/ @thiagoftsm @vlvkobal @cakrit +health/notifications/ @Ferroin @thiagoftsm @cakrit +libnetdata/ @thiagofsm @mfundul @cakrit packaging/ @paulkatsoulakis @cakrit packaging/installer/ @paulkatsoulakis @cakrit packaging/makeself/ @paulkatsoulakis @cakrit -registry/ @gmosx @cakrit -streaming/ @cakrit @thiagoftsm -web/ @thiagoftsm @cakrit -web/gui/ @gmosx @cakrit +registry/ @VLegakis @cakrit +streaming/ @thiagoftsm @vlvkobal @cakrit +web/ @thiagoftsm @mfundul @vlvkobal @cakrit +web/gui/ @VLegakis @cakrit # Ownership by filetype (overwrites ownership by directory) -*.md @cakrit +*.md @cakrit @joelhans *.am @paulkatsoulakis # Ownership of specific files @@ -54,8 +54,8 @@ web/gui/ @gmosx @cakrit netdata.spec.in @paulkatsoulakis netdata-installer.sh @paulkatsoulakis @cakrit netlify.toml @cakrit -package.json @gmosx +package.json @VLegakis packaging/version @netdatabot -LICENSE.md @cakrit +LICENSE.md @cakrit @joelhans CHANGELOG.md @netdatabot diff --git a/.gitignore b/.gitignore index 789043e74..82000619b 100644 --- a/.gitignore +++ b/.gitignore @@ -139,8 +139,6 @@ cmake_install.cmake .DS_Store webcopylocal* -contrib/debian/changelog - # converted diagrams diagrams/*.png diagrams/*.svg @@ -168,6 +166,8 @@ callgrind.out.* gmon.out gmon.txt sitespeed-result/ +tests/acls/acl.sh +tests/urls/request.sh # tests and temp files python.d/python-modules-installer.sh diff --git a/.travis.yml b/.travis.yml index cb9d72901..a239a0c64 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ dist: xenial sudo: true language: c services: -- docker + - docker @@ -16,18 +16,20 @@ matrix: # Install dependencies for all, once # install: -- sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python-pip -- sudo apt install -y --only-upgrade docker-ce -- sudo pip install git-semver -- docker info -- source tests/installer/slack.sh -- export NOTIF_CHANNEL="automation-beta" -- if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi; -- export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)" -- if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi; -- export DEPLOY_REPO="netdata" # Default production packaging repository -- if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi; -- export PACKAGING_USER="$(echo ${TRAVIS_REPO_SLUG} | cut -d'/' -f1)" + - sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python-pip + - sudo apt install -y --only-upgrade docker-ce + - sudo pip install git-semver + - docker info + - source tests/installer/slack.sh + - export NOTIF_CHANNEL="automation-beta" + - if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi; + - export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)" + - export LATEST_RELEASE_VERSION="$(cat packaging/version | cut -d'-' -f1)" + - export LATEST_RELEASE_DATE="$(git log -1 --format=%aD "${LATEST_RELEASE_VERSION}" | cat)" + - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi; + - export DEPLOY_REPO="netdata" # Default production packaging repository + - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi; + - export PACKAGING_USER="$(echo ${TRAVIS_REPO_SLUG} | cut -d'/' -f1)" @@ -41,167 +43,148 @@ notifications: # Define the stage sequence and conditionals # stages: -# Mandatory runs, we always want these executed -- name: Code quality, linting, syntax, code style -- name: Build process -- name: Artifacts validation -- name: Artifacts validation on bare OS, stable to current lifecycle checks - if: branch = master AND (type = pull_request OR type = cron) - - # Nightly operations -- name: Nightly operations - if: branch = master AND type = cron -- name: Nightly release - if: branch = master AND type = cron - - # Scheduled releases -- name: Packaging for release - if: branch = master AND type != pull_request AND type != cron - -- name: Publish for release - if: branch = master AND type != pull_request AND type != cron AND commit_message =~ /(\[netdata release candidate\]|\[netdata major release\]|\[netdata minor release\]|\[netdata patch release\])/ - - # Build DEB packages under special conditions - # Ubuntu -- name: "Package ubuntu/disco" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/cosmic" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/bionic" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/artful" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ - - # Debian -- name: "Package debian/buster" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/stretch" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/jessie" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/wheezy" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ - - # Build RPM packages under special conditions - # Enterprise linux (Covers CentOS, Redhat, Amazon linux) -- name: "Package Enterprise Linux 7" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Enterprise Linux\]|\[Package arm64 RPM\]|\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package AMD64 RPM Enterprise Linux\]|\[Package AMD64 RPM\])/ -- name: "Package Enterprise linux 6" - if: type != cron AND branch = master AND commit_message =~ /(\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package AMD64 RPM Enterprise Linux\]|\[Package AMD64 RPM\])/ - - # Fedora -- name: "Package Fedora 30" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ -- name: "Package Fedora 29" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ -- name: "Package Fedora 28" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ - - # OpenSuSE -- name: "Package OpenSuSE 15.1" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package AMD64 RPM openSuSE\]|\[Package AMD64 RPM\])/ -- name: "Package OpenSuSE 15.0" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package AMD64 RPM openSuSE\]|\[Package AMD64 RPM\])/ - - - -# DEB and RPM template flows -- stage: &_RPM_TEMPLATE - name: "Build & Publish RPM package" - before_install: - - sudo apt-get install -y wget lxc lxc-templates - - source tests/installer/slack.sh - before_script: - - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" - - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" - script: - - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh - - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh - - sudo chmod -R 755 "/var/lib/lxc" - - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh - after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}" - - before_deploy: - - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found" - deploy: - # Beta packages deployment - - provider: packagecloud - repository: "${DEPLOY_REPO}" - username: "${PACKAGING_USER}" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when packages directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - # Production release packages deployment - - provider: packagecloud - repository: "netdata" - username: "netdata" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when packages directory is created - repo: "netdata/netdata" - branch: "master" - condition: -d "${PACKAGES_DIRECTORY}" - after_deploy: - - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; - - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; - - - # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging -- stage: &_DEB_TEMPLATE - name: "Build & Publish DEB package" - before_install: - - sudo apt-get install -y wget lxc lxc-templates - - source tests/installer/slack.sh - before_script: - - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" - - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" - script: - - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh - - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh - - sudo chmod -R 755 "/var/lib/lxc" - - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh - after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}" - before_deploy: - - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found" - deploy: - # Beta packages deployment - - provider: packagecloud - repository: "${DEPLOY_REPO}" - username: "${PACKAGING_USER}" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when build-area directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - # Production release packages deployment - - provider: packagecloud - repository: "netdata" - username: "netdata" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when build-area directory is created - repo: "netdata/netdata" - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - after_deploy: - - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; - - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; + # Mandatory runs, we always want these executed + - name: Code quality, linting, syntax, code style + - name: Build process + - name: Artifacts validation + - name: Artifacts validation on bare OS, stable to current lifecycle checks + if: branch = master AND (type = pull_request OR type = cron) + + # Nightly operations + - name: Nightly operations + if: branch = master AND type = cron + - name: Nightly release + if: branch = master AND type = cron + + # Scheduled releases + - name: Packaging for release + if: branch = master AND type != pull_request AND type != cron + + - name: Publish for release + if: branch = master AND type != pull_request AND type != cron AND commit_message =~ /(\[netdata release candidate\]|\[netdata major release\]|\[netdata minor release\]|\[netdata patch release\])/ + + # Build DEB packages under special conditions + # Ubuntu + - name: "Package ubuntu/disco" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + - name: "Package ubuntu/cosmic" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + - name: "Package ubuntu/bionic" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + + # Debian + - name: "Package debian/buster" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + - name: "Package debian/stretch" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + - name: "Package debian/jessie" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + + # Build RPM packages under special conditions + # Enterprise linux (Covers CentOS, Redhat, Amazon linux) + - name: "Package Enterprise Linux 7" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Enterprise Linux\]|\[Package arm64 RPM\]|\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/ + - name: "Package Enterprise linux 6" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/ + + # Fedora + - name: "Package Fedora 30" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + - name: "Package Fedora 29" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + - name: "Package Fedora 28" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + + # OpenSuSE + - name: "Package OpenSuSE 15.1" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/ + - name: "Package OpenSuSE 15.0" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/ + + + + # DEB and RPM template flows + - stage: &_RPM_TEMPLATE + name: "Build & Publish RPM package" + before_install: + - sudo apt-get install -y wget lxc lxc-templates + - source tests/installer/slack.sh + before_script: + - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" + - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh + - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh + - sudo chmod -R 755 "/var/lib/lxc" + - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}" + before_deploy: + - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found" + deploy: + - provider: packagecloud + repository: "${DEPLOY_REPO}" + username: "${PACKAGING_USER}" + token: "${PKG_CLOUD_TOKEN}" + dist: "${BUILD_STRING}" + local_dir: "${PACKAGES_DIRECTORY}" + skip_cleanup: true + on: + # Only deploy on ${USER}/netdata, master branch, when packages directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: "master" + condition: -d "${PACKAGES_DIRECTORY}" + after_deploy: + - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; + - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; + + + + # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging + - stage: &_DEB_TEMPLATE + name: "Build & Publish DEB package" + before_install: + - sudo apt-get install -y wget lxc lxc-templates dh-make git-buildpackage build-essential libdistro-info-perl + - source tests/installer/slack.sh + before_script: + - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" + - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh + - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh + - sudo chown -R root:travis "/var/lib/lxc" + - sudo chmod -R 750 "/var/lib/lxc" + - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}" + before_deploy: + - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found" + deploy: + - provider: packagecloud + repository: "${DEPLOY_REPO}" + username: "${PACKAGING_USER}" + token: "${PKG_CLOUD_TOKEN}" + dist: "${BUILD_STRING}" + local_dir: "${PACKAGES_DIRECTORY}" + skip_cleanup: true + on: + # Only deploy on ${USER}/netdata, master branch, when build-area directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: "master" + condition: -d "${PACKAGES_DIRECTORY}" + after_deploy: + - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; + - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; @@ -209,464 +192,447 @@ stages: # jobs: include: - # Do code quality, syntax checking and other pre-build activities - - stage: Code quality, linting, syntax, code style + # Do code quality, syntax checking and other pre-build activities + - stage: Code quality, linting, syntax, code style - name: Run shellchecking on BASH - script: shellcheck --format=gcc $(find . -name '*.sh.in' -not -iwholename '*.git*') + name: Run shellchecking on BASH + script: shellcheck --format=gcc $(find . -name '*.sh.in' -not -iwholename '*.git*') - # This falls under same stage defined earlier - - name: Run checksum checks on kickstart files - script: ./tests/installer/checksums.sh - env: LOCAL_ONLY="true" + # This falls under same stage defined earlier + - name: Run checksum checks on kickstart files + script: ./tests/installer/checksums.sh + env: LOCAL_ONLY="true" - # This falls under same stage defined earlier - - name: Web Dashboard pre-generated file consistency checks (dashboard.js) - script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js + # This falls under same stage defined earlier + - name: Web Dashboard pre-generated file consistency checks (dashboard.js) + script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js - # Ensure netdata code builds successfully - - stage: Build process + # Ensure netdata code builds successfully + - stage: Build process - name: Standard netdata build - script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto - env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' - after_failure: post_message "TRAVIS_MESSAGE" " standard netdata build is failing (Still dont know which one, will improve soon)" + name: Standard netdata build + script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto + env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' + after_failure: post_message "TRAVIS_MESSAGE" " standard netdata build is failing (Still dont know which one, will improve soon)" - - name: Docker container build process (alpine installation) - script: packaging/docker/build.sh - env: DEVEL="true" - after_failure: post_message "TRAVIS_MESSAGE" "Docker build process failed" + - name: Docker container build process (alpine installation) + script: packaging/docker/build.sh + env: DEVEL="true" + after_failure: post_message "TRAVIS_MESSAGE" "Docker build process failed" - - name: Run 'make dist' validation - before_script: mkdir /tmp/netdata-makedist-test - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make clean || echo "Nothing to clean" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean || echo "Nothing to distclean" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" /bin/bash -c "autoreconf -ivf && ./configure --prefix=/netdata_install/usr --sysconfdir=/netdata_install/etc --localstatedir=/netdata_install/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make dist - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" ls -ltr ./netdata-$(git describe).tar.gz || ls -ltr ./netdata-$(cat packaging/version | tr -d '\n').tar.gz - - .travis/run_install_with_dist_file.sh - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean - git: - depth: false - after_script: rm -rf /tmp/netdata-makedist-test - after_failure: post_message "TRAVIS_MESSAGE" "'make dist' failed" + - name: Run 'make dist' validation + before_script: mkdir /tmp/netdata-makedist-test + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make clean || echo "Nothing to clean" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean || echo "Nothing to distclean" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" /bin/bash -c "autoreconf -ivf && ./configure --prefix=/netdata_install/usr --sysconfdir=/netdata_install/etc --localstatedir=/netdata_install/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make dist + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" ls -ltr ./netdata-$(git describe).tar.gz || ls -ltr ./netdata-$(cat packaging/version | tr -d '\n').tar.gz + - .travis/run_install_with_dist_file.sh + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean + git: + depth: false + after_script: rm -rf /tmp/netdata-makedist-test + after_failure: post_message "TRAVIS_MESSAGE" "'make dist' failed" - - stage: Artifacts validation + - stage: Artifacts validation - name: Unit Testing - script: - - fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto - - $HOME/netdata/usr/sbin/netdata -W unittest - env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' - after_failure: post_message "TRAVIS_MESSAGE" "Unit testing failed" - - - name: Build/install on ubuntu 14.04 (not containerized) - script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 14.04" + name: Unit Testing + script: + - fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto + - $HOME/netdata/usr/sbin/netdata -W unittest + env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' + after_failure: post_message "TRAVIS_MESSAGE" "Unit testing failed" + + - name: Build/install on ubuntu 14.04 (not containerized) + script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 14.04" - - name: Build/Install for ubuntu 18.04 (not containerized) - script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04" + - name: Build/Install for ubuntu 18.04 (not containerized) + script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04" - - name: Run netdata lifecycle, on ubuntu 18.04 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats - after_failure: post_message "TRAVIS_MESSAGE" "Netdata lifecycle test script failed on ubuntu 18.04" + - name: Run netdata lifecycle, on ubuntu 18.04 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats + after_failure: post_message "TRAVIS_MESSAGE" "Netdata lifecycle test script failed on ubuntu 18.04" - - name: Run netdata lifecycle from stable to current, on CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on CentOS 7" - - - name: Build/install for CentOS 6 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 6" + - name: Run netdata lifecycle from stable to current, on CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on CentOS 7" + + - name: Build/install for CentOS 6 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 6" - - name: Build/install for CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7" + - name: Build/install for CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7" - - stage: "Artifacts validation on bare OS, stable to current lifecycle checks" - - # Ubuntu runs - name: Run netdata lifecycle on Ubuntu 16.04 (xenial) - script: sudo -E tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 16.04" + - stage: "Artifacts validation on bare OS, stable to current lifecycle checks" + + # Ubuntu runs + name: Run netdata lifecycle on Ubuntu 16.04 (xenial) + script: sudo -E tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 16.04" - - name: Run netdata lifecycle, on Ubuntu 19.04 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "ubuntu:19.04" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 19.04" + - name: Run netdata lifecycle, on Ubuntu 19.04 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "ubuntu:19.04" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 19.04" - # Centos runs - - name: Run netdata lifecycle on CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "centos:7" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare CentOS 7" + # Centos runs + - name: Run netdata lifecycle on CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "centos:7" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare CentOS 7" - # Debian runs - - name: Run netdata lifecycle, on Debian 9 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "debian:stretch" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Debian 9 (stretch)" + # Debian runs + - name: Run netdata lifecycle, on Debian 9 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "debian:stretch" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Debian 9 (stretch)" - # openSuSE runs - - name: Run netdata lifecycle, on openSuSE 15.0 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.0" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.0" + # openSuSE runs + - name: Run netdata lifecycle, on openSuSE 15.0 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.0" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.0" - - name: Run netdata lifecycle, on openSuSE 15.1 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.1" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.1" + - name: Run netdata lifecycle, on openSuSE 15.1 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.1" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.1" - - name: Run netdata lifecycle, on openSuSE Tumbleweed - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/tumbleweed:latest" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/tumbleweed:latest" + - name: Run netdata lifecycle, on openSuSE Tumbleweed + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/tumbleweed:latest" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/tumbleweed:latest" - # Alpine runs - - name: Run netdata lifecycle, on Alpine linux - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "alpine" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Alpine" + # Alpine runs + - name: Run netdata lifecycle, on Alpine linux + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "alpine" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Alpine" - # Arch linux runs - - name: Run netdata lifecycle, on ArchLinux - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "archlinux/base:latest" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare archlinux/base:latest" + # Arch linux runs + - name: Run netdata lifecycle, on ArchLinux + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "archlinux/base:latest" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare archlinux/base:latest" - # Fedora runs - - name: Run netdata lifecycle, on Fedora 28 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:28" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 28" + # Fedora runs + - name: Run netdata lifecycle, on Fedora 28 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:28" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 28" - - name: Run netdata lifecycle, on Fedora 29 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:29" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 29" + - name: Run netdata lifecycle, on Fedora 29 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:29" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 29" - - name: Run netdata lifecycle, on Fedora 30 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:30" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 30" + - name: Run netdata lifecycle, on Fedora 30 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:30" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 30" - - stage: Packaging for release + - stage: Packaging for release - name: Generate changelog and TAG the release (only on special commit msg) - before_script: post_message "TRAVIS_MESSAGE" "Packaging step for release initiated" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - .travis/generate_changelog_and_tag_release.sh - after_failure: post_message "TRAVIS_MESSAGE" " Packaging for release failed" - git: - depth: false + name: Generate changelog and TAG the release (only on special commit msg) + before_script: post_message "TRAVIS_MESSAGE" "Packaging step for release initiated" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - .travis/generate_changelog_and_tag_release.sh + after_failure: post_message "TRAVIS_MESSAGE" " Packaging for release failed" + git: + depth: false - - name: Run labeler on github issues - script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA + - name: Run labeler on github issues + script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA - # ###### Packaging workflow section ###### - # References: - # https://us.images.linuxcontainers.org - # https://packagecloud.io/docs#install_repo + # ###### Packaging workflow section ###### + # References: + # https://us.images.linuxcontainers.org + # https://packagecloud.io/docs#install_repo - # Ubuntu distros build - # - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/disco" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="disco" BUILD_STRING="ubuntu/disco" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Ubuntu distros build + # + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/disco" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="disco" BUILD_STRING="ubuntu/disco" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/cosmic" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="cosmic" BUILD_STRING="ubuntu/cosmic" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/cosmic" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="cosmic" BUILD_STRING="ubuntu/cosmic" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/bionic" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/bionic" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/artful" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="artful" BUILD_STRING="ubuntu/artful" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Debian distros build + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/buster" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - # Debian distros build - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/buster" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/stretch" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/stretch" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/jessie" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="jessie" BUILD_STRING="debian/jessie" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/jessie" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="jessie" BUILD_STRING="debian/jessie" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Enterprise linux builds (Centos, Redhat, Amazon linux (el/6)) + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Enterprise Linux 7" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7" + - PACKAGE_TYPE="rpm" REPO_TOOL="yum" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/wheezy" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="wheezy" BUILD_STRING="debian/wheezy" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Enterprise linux 6" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="6" BUILD_STRING="el/6" + - PACKAGE_TYPE="rpm" REPO_TOOL="yum" + - ALLOW_SOFT_FAILURE_HERE=true + + # Fedora distros build + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 30" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="30" BUILD_STRING="fedora/30" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true - # Enterprise linux builds (Centos, Redhat, Amazon linux (el/6)) - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Enterprise Linux 7" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7" - - PACKAGE_TYPE="rpm" REPO_TOOL="yum" - - ALLOW_SOFT_FAILURE_HERE=true + + + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 29" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="29" BUILD_STRING="fedora/29" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Enterprise linux 6" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="6" BUILD_STRING="el/6" - - PACKAGE_TYPE="rpm" REPO_TOOL="yum" - - ALLOW_SOFT_FAILURE_HERE=true - - - - # Fedora distros build - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 30" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="30" BUILD_STRING="fedora/30" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 29" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="29" BUILD_STRING="fedora/29" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 28" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="28" BUILD_STRING="fedora/28" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - # Opensuse distros build - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package OpenSuSE 15.1" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.1" - - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package OpenSuSE 15.0" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.0" - - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" - - ALLOW_SOFT_FAILURE_HERE=true - # ###### End of packaging workflow section ###### # - # ############################################### # - - - - # We only publish if a TAG has been set during packaging - - stage: Publish for release - - name: Build & Publish docker images - before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - packaging/docker/check_login.sh - - packaging/docker/build.sh - - packaging/docker/publish.sh - after_failure: post_message "TRAVIS_MESSAGE" " Docker image publishing failed" - git: - depth: false - env: ALLOW_SOFT_FAILURE_HERE=true - # We don't run on release candidates - if: tag !~ /(-rc)/ - - - name: Create release draft - before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs - - .travis/draft_release.sh - git: - depth: false - after_failure: post_message "TRAVIS_MESSAGE" " Draft release submission failed" - # We don't run on release candidates - if: tag !~ /(-rc)/ - - - - # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc) - - stage: Nightly operations - - name: Run coverity scan - # Just notify people that Nightly ops triggered, use the first step as a hook to do that - before_script: post_message "TRAVIS_MESSAGE" "Starting nightly operations" "${NOTIF_CHANNEL}" - script: ./coverity-install.sh && ./coverity-scan.sh || echo "Coverity failed :(" - - - name: Kickstart files integrity testing (extended) - script: ./tests/installer/checksums.sh - - - name: Run labeler on github issues - script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA - - # This is generating the changelog for nightly release and publish it - - name: Generate nightly changelog - before_script: post_message "TRAVIS_MESSAGE" "Starting changelog generation for nightlies" "${NOTIF_CHANNEL}" - script: ".travis/nightlies.sh" - after_failure: post_message "TRAVIS_MESSAGE" " Nightly changelog generation failed" - git: - depth: false - - - - # This is the nightly execution step - # - - stage: Nightly release - - name: Build & Publish docker images - before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images for nightlies" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - docker info - - packaging/docker/check_login.sh - - packaging/docker/build.sh - - packaging/docker/publish.sh - after_failure: post_message "TRAVIS_MESSAGE" " Nightly docker image publish failed" - git: - depth: false - env: ALLOW_SOFT_FAILURE_HERE=true - - - name: Create nightly release artifacts, publish to GCS - before_script: post_message "TRAVIS_MESSAGE" "Starting artifacts generation for nightlies" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - .travis/create_artifacts.sh - after_failure: post_message "TRAVIS_MESSAGE" " Nightly artifacts generation failed" - git: - depth: false - before_deploy: - echo "Preparing creds under ${TRAVIS_REPO_SLUG}"; - if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - else - echo "Beta deployment stage in progress"; - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - fi; - deploy: - # Beta storage, used for testing purposes - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-dev-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata" - - # Production storage - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: netdata/netdata - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata" - after_deploy: rm -f .travis/gcs-credentials.json + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 28" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="28" BUILD_STRING="fedora/28" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true + + + + # Opensuse distros build + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package OpenSuSE 15.1" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.1" + - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" + - ALLOW_SOFT_FAILURE_HERE=true + + + + - stage: + <<: *_RPM_TEMPLATE + stage: "Package OpenSuSE 15.0" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.0" + - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" + - ALLOW_SOFT_FAILURE_HERE=true + # ###### End of packaging workflow section ###### # + # ############################################### # + + + + # We only publish if a TAG has been set during packaging + - stage: Publish for release + + name: Build & Publish docker images + before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - packaging/docker/check_login.sh + - echo "Switching to latest master branch, to pick up tagging if any" && git checkout master && git pull + - packaging/docker/build.sh + - packaging/docker/publish.sh + after_failure: post_message "TRAVIS_MESSAGE" " Docker image publishing failed" + git: + depth: false + env: ALLOW_SOFT_FAILURE_HERE=true + # We don't run on release candidates + if: tag !~ /(-rc)/ + + - name: Create release draft + before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs + - .travis/draft_release.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" " Draft release submission failed" + # We don't run on release candidates + if: tag !~ /(-rc)/ + + + + # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc) + - stage: Nightly operations + + name: Run coverity scan + # Just notify people that Nightly ops triggered, use the first step as a hook to do that + before_script: post_message "TRAVIS_MESSAGE" "Starting nightly operations" "${NOTIF_CHANNEL}" + script: ./coverity-install.sh && ./coverity-scan.sh || echo "Coverity failed :(" + + - name: Kickstart files integrity testing (extended) + script: ./tests/installer/checksums.sh + + - name: Run labeler on github issues + script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA + + # This is generating the changelog for nightly release and publish it + - name: Generate nightly changelog + before_script: post_message "TRAVIS_MESSAGE" "Starting changelog generation for nightlies" "${NOTIF_CHANNEL}" + script: + - ".travis/nightlies.sh" + - ".travis/check_changelog_last_modification.sh" + after_failure: post_message "TRAVIS_MESSAGE" " Nightly changelog generation failed" + git: + depth: false + + + + # This is the nightly execution step + # + - stage: Nightly release + + name: Build & Publish docker images + before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images for nightlies" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - docker info + - packaging/docker/check_login.sh + - packaging/docker/build.sh + - packaging/docker/publish.sh + after_failure: post_message "TRAVIS_MESSAGE" " Nightly docker image publish failed" + git: + depth: false + env: ALLOW_SOFT_FAILURE_HERE=true + + - name: Create nightly release artifacts, publish to GCS + before_script: post_message "TRAVIS_MESSAGE" "Starting artifacts generation for nightlies" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - .travis/create_artifacts.sh + after_failure: post_message "TRAVIS_MESSAGE" " Nightly artifacts generation failed" + git: + depth: false + before_deploy: + echo "Preparing creds under ${TRAVIS_REPO_SLUG}"; + if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then + openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; + else + echo "Beta deployment stage in progress"; + openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; + fi; + deploy: + # Beta storage, used for testing purposes + - provider: gcs + edge: + branch: gcs-ng + project_id: netdata-storage + credentials: .travis/gcs-credentials.json + bucket: "netdata-dev-nightlies" + skip_cleanup: true + local_dir: "artifacts" + on: + # Only deploy on netdata/netdata, master branch, when artifacts directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: master + condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata" + + # Production storage + - provider: gcs + edge: + branch: gcs-ng + project_id: netdata-storage + credentials: .travis/gcs-credentials.json + bucket: "netdata-nightlies" + skip_cleanup: true + local_dir: "artifacts" + on: + # Only deploy on netdata/netdata, master branch, when artifacts directory is created + repo: netdata/netdata + branch: master + condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata" + after_deploy: rm -f .travis/gcs-credentials.json diff --git a/.travis/README.md b/.travis/README.md index b7b61ecb4..3b314fa18 100644 --- a/.travis/README.md +++ b/.travis/README.md @@ -138,6 +138,6 @@ We plan to support amd64, x86 and arm64 architectures. As of June 2019 only amd6 The Package deployment can be triggered manually by executing an empty commit with the following message pattern: `[Package PACKAGE_TYPE PACKAGE_ARCH] DESCRIBE_THE_REASONING_HERE`. Travis Yaml configuration allows the user to combine package type and architecture as necessary to regenerate the current stable release (For example tag v1.15.0 as of 4th of May 2019) -Sample patterns to trigger building of packages for all AMD64 supported architecture: -- '[Package AMD64 RPM]': Build & publish all amd64 available RPM packages -- '[Package AMD64 DEB]': Build & publish all amd64 available DEB packages +Sample patterns to trigger building of packages for all amd64 supported architecture: +- '[Package amd64 RPM]': Build & publish all amd64 available RPM packages +- '[Package amd64 DEB]': Build & publish all amd64 available DEB packages diff --git a/.travis/check_changelog_last_modification.sh b/.travis/check_changelog_last_modification.sh new file mode 100755 index 000000000..2665c0627 --- /dev/null +++ b/.travis/check_changelog_last_modification.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -e + +LAST_MODIFICATION="$(git log -1 --pretty="format:%at" CHANGELOG.md)" +CURRENT_TIME="$(date +"%s")" +TWO_DAYS_IN_SECONDS=172800 + +DIFF=$((CURRENT_TIME - LAST_MODIFICATION)) + +echo "Checking CHANGELOG.md last modification time on GIT.." +echo "CHANGELOG.md timestamp: ${LAST_MODIFICATION}" +echo "Current timestamp: ${CURRENT_TIME}" +echo "Diff: ${DIFF}" + +if [ ${DIFF} -gt ${TWO_DAYS_IN_SECONDS} ]; then + echo "CHANGELOG.md is more than two days old!" + post_message "TRAVIS_MESSAGE" "Hi , CHANGELOG.md was found more than two days old (Diff: ${DIFF} seconds)" "${NOTIF_CHANNEL}" +else + echo "CHANGELOG.md is less than two days old, fine" +fi diff --git a/.travis/generate_changelog_for_nightlies.sh b/.travis/generate_changelog_for_nightlies.sh index 68491fa9b..b90862880 100755 --- a/.travis/generate_changelog_for_nightlies.sh +++ b/.travis/generate_changelog_for_nightlies.sh @@ -49,6 +49,7 @@ docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest --token "${GITHUB_TOKEN}" \ --since-tag "v1.10.0" \ --unreleased-label "**Next release**" \ + --no-issues \ --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \ --no-compare-link ${OPTS} diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh index 188b37da0..002461041 100755 --- a/.travis/nightlies.sh +++ b/.travis/nightlies.sh @@ -38,6 +38,12 @@ if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then fi echo "--- Running Changelog generation ---" -.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || echo "Changelog generation has failed, this is a soft error, process continues" +NIGHTLIES_CHANGELOG_FAILED=0 +.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || NIGHTLIES_CHANGELOG_FAILED=1 + +if [ ${NIGHTLIES_CHANGELOG_FAILED} -eq 1 ]; then + echo "Changelog generation has failed, this is a soft error, process continues" + post_message "TRAVIS_MESSAGE" "Changelog generation job for nightlies failed, possibly due to github issues" "${NOTIF_CHANNEL}" +fi exit "${FAIL}" diff --git a/.travis/package_management/build.sh b/.travis/package_management/build.sh new file mode 100644 index 000000000..beb522a35 --- /dev/null +++ b/.travis/package_management/build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +UNPACKAGED_NETDATA_PATH="$1" +LATEST_RELEASE_VERSION="$2" + +if [ -z "${LATEST_RELEASE_VERSION}" ]; then + echo "Parameter 'LATEST_RELEASE_VERSION' not defined" + exit 1 +fi + +if [ -z "${UNPACKAGED_NETDATA_PATH}" ]; then + echo "Parameter 'UNPACKAGED_NETDATA_PATH' not defined" + exit 1 +fi + +echo "Running changelog generation mechanism since ${LATEST_RELEASE_VERSION}" + +echo "Entering ${UNPACKAGED_NETDATA_PATH}" +cd "${UNPACKAGED_NETDATA_PATH}" + +echo "Linking debian -> contrib/debian" +ln -sf contrib/debian debian + +echo "Executing dpkg-buildpackage" +if dpkg-buildpackage --version 2> /dev/null | grep -q "1.18"; then + dpkg-buildpackage --post-clean --pre-clean --build=binary +else + dpkg-buildpackage -b +fi + +echo "DEB build script completed!" diff --git a/.travis/package_management/common.py b/.travis/package_management/common.py index 6cf59293d..6e7a26023 100755 --- a/.travis/package_management/common.py +++ b/.travis/package_management/common.py @@ -6,6 +6,22 @@ import lxc import subprocess +import os + +def fetch_version(orig_build_version): + tag = None + friendly_version = "" + + # TODO: Checksum validations + if str(orig_build_version).count(".latest") == 1: + version_list=str(orig_build_version).replace('v', '').split('.') + friendly_version='.'.join(version_list[0:2]) + "." + version_list[3] + else: + friendly_version = orig_build_version.replace('v', '') + tag = friendly_version # Go to stable tag + print("Version set to %s from %s" % (friendly_version, orig_build_version)) + + return friendly_version, tag def replace_tag(tag_name, spec, new_tag_content): print("Fixing tag %s in %s" % (tag_name, spec)) @@ -44,3 +60,103 @@ def run_command_in_host(cmd): print('Output: ' + o.decode('ascii')) print('Error: ' + e.decode('ascii')) print('code: ' + str(proc.returncode)) + +def prepare_repo(container): + if str(os.environ["REPO_TOOL"]).count("zypper") == 1: + run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"]) + run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"]) + + elif str(os.environ["REPO_TOOL"]).count("yum") == 1: + run_command(container, [os.environ["REPO_TOOL"], "clean", "all"]) + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + + if os.environ["BUILD_STRING"].count("el/7") == 1 and os.environ["BUILD_ARCH"].count("i386") == 1: + print ("Skipping epel-release install for %s-%s" % (os.environ["BUILD_STRING"], os.environ["BUILD_ARCH"])) + else: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"]) + + elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + else: + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) + +def install_common_dependendencies(container): + if str(os.environ["REPO_TOOL"]).count("zypper") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c"]) + + elif str(os.environ["REPO_TOOL"]).count("yum") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + + elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "g++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libipmimonitoring-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libjson-c-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libsnappy-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotobuf-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotoc-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + if os.environ["BUILD_STRING"].count("debian/jessie") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy"]) + else: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + + if os.environ["BUILD_STRING"].count("el/6") <= 0: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autogen"]) + +def prepare_version_source(dest_archive, pkg_friendly_version, tag=None): + print(".0 Preparing local implementation tarball for version %s" % pkg_friendly_version) + tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive + + if tag is not None: + print(".1 Checking out tag %s" % tag) + run_command_in_host(['git', 'fetch', '--all']) + + # TODO: Keep in mind that tricky 'v' there, needs to be removed once we clear our versioning scheme + run_command_in_host(['git', 'checkout', 'v%s' % pkg_friendly_version]) + + print(".2 Tagging the code with version: %s" % pkg_friendly_version) + run_command_in_host(['git', 'tag', '-a', pkg_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]]) + + print(".3 Run autoreconf -ivf") + run_command_in_host(['autoreconf', '-ivf']) + + print(".4 Run configure") + run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata']) + + print(".5 Run make dist") + run_command_in_host(['make', 'dist']) + + print(".6 Copy generated tarbal to desired path") + if os.path.exists('netdata-%s.tar.gz' % pkg_friendly_version): + run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % pkg_friendly_version, tar_file]) + + print(".7 Fixing permissions on tarball") + run_command_in_host(['sudo', 'chmod', '777', tar_file]) + else: + print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % pkg_friendly_version) + sys.exit(1) diff --git a/.travis/package_management/configure_deb_lxc_environment.py b/.travis/package_management/configure_deb_lxc_environment.py index 58999ad38..12328dde7 100755 --- a/.travis/package_management/configure_deb_lxc_environment.py +++ b/.travis/package_management/configure_deb_lxc_environment.py @@ -39,27 +39,57 @@ print("Waiting for container connectivity to start configuration sequence") if not container.get_ips(timeout=30): raise Exception("Timeout while waiting for container") +build_path = "/home/%s" % os.environ['BUILDER_NAME'] + # Run the required activities now # 1. Create the builder user print("1. Adding user %s" % os.environ['BUILDER_NAME']) common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) # Fetch package dependencies for the build -print("2. Installing package dependencies within LXC container") -common.run_command(container, ["apt-get", "update", "-y"]) -common.run_command(container, ["apt-get", "install", "-y", "sudo"]) -common.run_command(container, ["apt-get", "install", "-y", "wget"]) -common.run_command(container, ["apt-get", "install", "-y", "bash"]) -common.run_command(container, ["wget", "-T", "15", "-O", "~/.install-required-packages.sh", "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "~/.install-required-packages.sh", "netdata", "--dont-wait", "--non-interactive"]) - -# Download the source -dest_archive="/home/%s/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'],os.environ['BUILD_VERSION']) -release_url="https://github.com/netdata/netdata/releases/download/%s/netdata-%s.tar.gz" % (os.environ['BUILD_VERSION'], os.environ['BUILD_VERSION']) -print("3. Fetch netdata source (%s -> %s)" % (release_url, dest_archive)) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "wget", "-T", "15", "--output-document=" + dest_archive, release_url]) - -print("4. Extracting directory contents to /home " + os.environ['BUILDER_NAME']) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", dest_archive, "-C", "/home/" + os.environ['BUILDER_NAME']]) +print("2. Preparing repo on LXC container") +common.prepare_repo(container) + +print("2.1 Install .DEB build support packages") +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dpkg-dev"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libdistro-info-perl"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-make"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-systemd"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-autoreconf"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "git-buildpackage"]) + +print("2.2 Add more dependencies") +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter-acct-dev"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) + +print ("3.1 Run install-required-packages scriptlet") +common.run_command(container, ["wget", "-T", "15", "-O", "%s/.install-required-packages.sh" % build_path, "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) +common.run_command(container, ["bash", "%s/.install-required-packages.sh" % build_path, "netdata", "--dont-wait", "--non-interactive"]) + +print("3.2 Installing package dependencies within LXC container") +common.install_common_dependendencies(container) + +friendly_version="" +dest_archive="" +download_url="" +tag = None +friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) + +tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), friendly_version) + +print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) +dest_archive="%s/netdata-%s.tar.gz" % (build_path, friendly_version) + +if str(os.environ["BUILD_STRING"]).count("debian/jessie") == 1: + print("5.1 We are building for Jessie, adjusting control file") + common.run_command_in_host(['sudo', 'rm', 'contrib/debian/control']) + common.run_command_in_host(['sudo', 'cp', 'contrib/debian/control.jessie', 'contrib/debian/control']) + +common.prepare_version_source(dest_archive, friendly_version, tag=tag) + +print("6. Installing build.sh script to build path") +common.run_command_in_host(['sudo', 'cp', '.travis/package_management/build.sh', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) +common.run_command_in_host(['sudo', 'chmod', '777', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) +common.run_command_in_host(['sudo', 'ln', '-sf', 'contrib/debian', 'debian']) print("Done!") diff --git a/.travis/package_management/configure_rpm_lxc_environment.py b/.travis/package_management/configure_rpm_lxc_environment.py index 644e027b7..79d34608f 100755 --- a/.travis/package_management/configure_rpm_lxc_environment.py +++ b/.travis/package_management/configure_rpm_lxc_environment.py @@ -46,30 +46,15 @@ print("1. Adding user %s" % os.environ['BUILDER_NAME']) common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) # Fetch package dependencies for the build -print("2. Installing package dependencies within LXC container") -if str(os.environ["REPO_TOOL"]).count("zypper") == 1: - common.run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"]) - common.run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"]) - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"]) - -elif str(os.environ["REPO_TOOL"]).count("yum") == 1: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - common.run_command(container, [os.environ["REPO_TOOL"], "clean", "all"]) - common.run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"]) -else: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - common.run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) +print("2.1 Preparing repo on LXC container") +common.prepare_repo(container) + +common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) +common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"]) # Exceptional cases, not available everywhere # - +print("2.2 Running uncommon dependencies and preparing LXC environment") # Not on Centos-7 if os.environ["BUILD_STRING"].count("el/7") <= 0: common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter_acct-devel"]) @@ -78,8 +63,8 @@ if os.environ["BUILD_STRING"].count("el/7") <= 0: if os.environ["BUILD_STRING"].count("el/6") <= 0: common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autoconf-archive"]) -common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"]) +print("2.3 Installing common dependencies") +common.install_common_dependendencies(container) print("3. Setting up macros") common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "/bin/echo", "'%_topdir %(echo /home/" + os.environ['BUILDER_NAME'] + ")/rpmbuild' > /home/" + os.environ['BUILDER_NAME'] + "/.rpmmacros"]) @@ -97,63 +82,21 @@ rpm_friendly_version="" dest_archive="" download_url="" spec_file="/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME'] +tag = None +rpm_friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) +tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version) -# TODO: Checksum validations -if str(os.environ['BUILD_VERSION']).count(".latest") == 1: - version_list=str(os.environ['BUILD_VERSION']).replace('v', '').split('.') - rpm_friendly_version='.'.join(version_list[0:3]) + "." + version_list[3] - - print("Building latest nightly version of netdata..(%s)" % os.environ['BUILD_VERSION']) - dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version) - - print("5. Preparing local latest implementation tarball for version %s" % rpm_friendly_version) - tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive - - print("5.1 Tagging the code with latest version: %s" % rpm_friendly_version) - common.run_command_in_host(['git', 'tag', '-a', rpm_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]]) - - print("5.2 Run autoreconf -ivf") - common.run_command_in_host(['autoreconf', '-ivf']) - - print("5.3 Run configure") - common.run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata']) - - print("5.4 Run make dist") - common.run_command_in_host(['make', 'dist']) - - print("5.5 Copy generated tarbal to desired path") - if os.path.exists('netdata-%s.tar.gz' % rpm_friendly_version): - common.run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % rpm_friendly_version, tar_file]) - - print("5.6 Fixing permissions on tarball") - common.run_command_in_host(['sudo', 'chmod', '777', tar_file]) - else: - print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % rpm_friendly_version) - sys.exit(1) - - # Extract the spec file in place - print("6. Extract spec file from the source") - common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - - print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file)) - common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) -else: - rpm_friendly_version = os.environ['BUILD_VERSION'] - - print("Building latest stable version of netdata.. (%s)" % os.environ['BUILD_VERSION']) - dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'],os.environ['BUILD_VERSION']) - download_url="https://github.com/netdata/netdata/releases/download/%s/netdata-%s.tar.gz" % (os.environ['BUILD_VERSION'], os.environ['BUILD_VERSION']) +print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) +dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version) - print("5. Fetch netdata source into the repo structure(%s -> %s)" % (download_url, dest_archive)) - tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version) - common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "wget", "-T", "15", "--output-document=" + dest_archive, download_url]) +common.prepare_version_source(dest_archive, rpm_friendly_version, tag=tag) - print("6.Extract spec file from the source") - common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "--to-command=cat > %s" % spec_file, "-xvf", dest_archive, "netdata-%s/netdata.spec" % os.environ['BUILD_VERSION']]) +# Extract the spec file in place +print("6. Extract spec file from the source") +common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) +common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - print("7. Temporary hack: Adjust version string on the spec file (%s) to %s and Source0 to %s" % (os.environ['LXC_CONTAINER_ROOT'] + spec_file, rpm_friendly_version, download_url)) - common.replace_tag("Version", os.environ['LXC_CONTAINER_ROOT'] + spec_file, rpm_friendly_version) - common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) +print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file)) +common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) print('Done!') diff --git a/.travis/package_management/create_lxc_for_build.sh b/.travis/package_management/create_lxc_for_build.sh index ae855a742..d733687a8 100755 --- a/.travis/package_management/create_lxc_for_build.sh +++ b/.travis/package_management/create_lxc_for_build.sh @@ -83,7 +83,7 @@ case "${BUILD_ARCH}" in .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" ;; "i386"|"amd64"|"arm64") - # AMD64 or i386 + # amd64 or i386 echo "Creating LXC Container for ${BUILD_ARCH}.." export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}" export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" diff --git a/.travis/package_management/functions.sh b/.travis/package_management/functions.sh index 9a467ffe1..0c4425fa5 100644 --- a/.travis/package_management/functions.sh +++ b/.travis/package_management/functions.sh @@ -10,7 +10,7 @@ set -e function detect_arch_from_commit { case "${TRAVIS_COMMIT_MESSAGE}" in - "[Package AMD64"*) + "[Package amd64"*) export BUILD_ARCH="amd64" ;; "[Package i386"*) @@ -24,7 +24,7 @@ function detect_arch_from_commit { ;; *) - echo "Unknown build architecture '${BUILD_ARCH}' provided" + echo "Unknown build architecture in '${TRAVIS_COMMIT_MESSAGE}'. No BUILD_ARCH can be provided" exit 1 ;; esac diff --git a/.travis/package_management/prepare_packages.sh b/.travis/package_management/prepare_packages.sh index 1fb26a95e..12ed07cc7 100755 --- a/.travis/package_management/prepare_packages.sh +++ b/.travis/package_management/prepare_packages.sh @@ -27,29 +27,36 @@ for d in ${CREATED_CONTAINERS[@]}; do # Pick up any RPMS from builder RPM_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}/rpmbuild" - echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS" - - if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then - echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/RPMS" - [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}" - fi - - if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then - echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/SRPMS" - [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}" + if [ -d "${RPM_BUILD_PATH}" ]; then + echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS" + + if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then + echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:" + ls -ltrR "${RPM_BUILD_PATH}/RPMS" + [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}" + fi + + if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then + echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:" + ls -ltrR "${RPM_BUILD_PATH}/SRPMS" + [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}" + fi + else + DEB_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}" + echo "Checking folder ${DEB_BUILD_PATH} for DEB packages" + if [ -d "${DEB_BUILD_PATH}" ]; then + cp "${DEB_BUILD_PATH}"/netdata*.ddeb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .ddeb files" + cp "${DEB_BUILD_PATH}"/netdata*.deb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .deb files" + cp "${DEB_BUILD_PATH}"/netdata*.buildinfo "${PACKAGES_DIRECTORY}" || echo "Could not copy any .buildinfo files" + cp "${DEB_BUILD_PATH}"/netdata*.changes "${PACKAGES_DIRECTORY}" || echo "Could not copy any .changes files" + else + echo "Folder ${DEB_BUILD_PATH} does not exist or not a directory, nothing to do for package preparation" + fi fi - - # Pick up any DEBs from builder - DEB_BUILD_PATH="${d}/home/${BUILDER_NAME}/build-area" - echo "Checking folder ${DEB_BUILD_PATH} for DEB packages" - #TODO: During debian clean up we 'll fill this up - done chmod -R 777 "${PACKAGES_DIRECTORY}" diff --git a/.travis/package_management/trigger_deb_lxc_build.py b/.travis/package_management/trigger_deb_lxc_build.py index 3040bdd62..a0235a73d 100755 --- a/.travis/package_management/trigger_deb_lxc_build.py +++ b/.travis/package_management/trigger_deb_lxc_build.py @@ -37,15 +37,37 @@ if not container.running or not container.state == "RUNNING": if not container.get_ips(timeout=30): raise Exception("Timeout while waiting for container") +build_path = "/home/%s" % os.environ['BUILDER_NAME'] + print("Setting up EMAIL and DEBFULLNAME variables required by the build tools") os.environ["EMAIL"] = "bot@netdata.cloud" os.environ["DEBFULLNAME"] = "Netdata builder" # Run the build process on the container -print("Starting DEB build process, running dh-make") -new_version = os.environ["BUILD_VERSION"].replace('v', '') +new_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) +print("Starting DEB build process for version %s" % new_version) + +netdata_tarball = "%s/netdata-%s.tar.gz" % (build_path, new_version) +unpacked_netdata = netdata_tarball.replace(".tar.gz", "") + +print("Extracting tarball %s" % netdata_tarball) +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", netdata_tarball, "-C", build_path]) + +print("Fixing changelog tags") +changelog_in_host = "contrib/debian/changelog" +common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_VERSION/%s-1/g' % os.environ["LATEST_RELEASE_VERSION"].replace("v", ""), changelog_in_host]) +common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_DATE/%s/g' % os.environ["LATEST_RELEASE_DATE"], changelog_in_host]) + +print("Executing gbp dch command..") +common.run_command_in_host(['gbp', 'dch', '--release', '--ignore-branch', '--spawn-editor=snapshot', '--since=%s' % os.environ["LATEST_RELEASE_VERSION"], '--new-version=%s' % new_version]) + +print("Copying over changelog to the destination machine") +common.run_command_in_host(['sudo', 'cp', 'debian/changelog', "%s/%s/netdata-%s/contrib/debian/" % (os.environ['LXC_CONTAINER_ROOT'], build_path, new_version)]) + +print("Running debian build script since %s" % os.environ["LATEST_RELEASE_VERSION"]) +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "%s/build.sh" % build_path, unpacked_netdata, new_version]) -print("Building the package") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "dpkg-buildpackage", "--host-arch", "amd64", "--target-arch", "amd64", "--post-clean", "--pre-clean", "--build=binary", "--release-by=\"Netdata Builder\"", "--build-by=\"Netdata Builder\""]) +print("Listing contents on build path") +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltr", build_path]) print('Done!') diff --git a/.travis/package_management/yank_stale_rpm.sh b/.travis/package_management/yank_stale_rpm.sh index 5cf938664..3f7669712 100755 --- a/.travis/package_management/yank_stale_rpm.sh +++ b/.travis/package_management/yank_stale_rpm.sh @@ -21,7 +21,7 @@ fi PACKAGES_DIR="$1" DISTRO="$2" -PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep '\.rpm')" +PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep -e '\.rpm' -e '\.deb' -e '\.ddeb' )" if [ ! -d "${PACKAGES_DIR}" ] || [ -z "${PACKAGES_LIST}" ]; then echo "Folder ${PACKAGES_DIR} does not seem to be a valid directory or is empty. No packages to check for yanking" diff --git a/CHANGELOG.md b/CHANGELOG.md index ed1542517..d66fc666d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,126 @@ # Changelog +## [v1.16.1](https://github.com/netdata/netdata/tree/v1.16.1) (2019-07-31) + +**Fixed bugs:** + +- /etc/netdata/.environment gets updated constantly with auto-updates [\#6550](https://github.com/netdata/netdata/issues/6550) +- pluginsd python.d slowly eating more and more CPU due to smartd collection [\#6532](https://github.com/netdata/netdata/issues/6532) +- Special characters in configuration files can break the UI [\#6531](https://github.com/netdata/netdata/issues/6531) +- Plugin httpcheck heavily changes the WebUI [\#6530](https://github.com/netdata/netdata/issues/6530) +- Valid let's encrypt certifcate considerated as invalid by streaming client [\#6529](https://github.com/netdata/netdata/issues/6529) +- Can't start netdata in Amazon Linux [\#6522](https://github.com/netdata/netdata/issues/6522) +- Missing file when install the netdata [\#6519](https://github.com/netdata/netdata/issues/6519) +- netdata tengine invalid response length [\#6490](https://github.com/netdata/netdata/issues/6490) +- Snappy library is not detected correctly in all Linux distributions [\#6478](https://github.com/netdata/netdata/issues/6478) +- python sensors collector: sensors chips filtering doesnt work [\#6462](https://github.com/netdata/netdata/issues/6462) +- Streaming not working with ^SSL option in bind to = [\#6457](https://github.com/netdata/netdata/issues/6457) +- fix CRC error handling in dbengine [\#6451](https://github.com/netdata/netdata/issues/6451) +- python.d ERROR: unbound\[local\] : \[Errno 57\] Socket is not connected FreeBSD [\#6434](https://github.com/netdata/netdata/issues/6434) +- Issue with rethinkdbs plugin [\#6429](https://github.com/netdata/netdata/issues/6429) +- Double free or corruption \(again\) [\#6412](https://github.com/netdata/netdata/issues/6412) +- rpm version should be 1.16.0 not v1.16.0 \(extra "v"\) [\#6409](https://github.com/netdata/netdata/issues/6409) +- Netdata does not detect pkg-config under automated install [\#6405](https://github.com/netdata/netdata/issues/6405) +- No LVM disk space usage on CentOS [\#6401](https://github.com/netdata/netdata/issues/6401) +- Cannot see charts in an imported snapshot [\#6384](https://github.com/netdata/netdata/issues/6384) +- netdata does not send notifications for alarms which fail with \(errno 12, Out of memory\) [\#6335](https://github.com/netdata/netdata/issues/6335) +- netdata/packaging: Fine tune documentation regarding package dependencies per distribution [\#6300](https://github.com/netdata/netdata/issues/6300) +- netdata/web: layout getting messed up on certain dimensions when resizing the window [\#6269](https://github.com/netdata/netdata/issues/6269) +- charts.d kills process twice [\#6190](https://github.com/netdata/netdata/issues/6190) +- MacOS Path Issues [\#6165](https://github.com/netdata/netdata/issues/6165) +- Memory leak in power supply module [\#6132](https://github.com/netdata/netdata/issues/6132) +- Question for the Netdata-Dashboard [\#6037](https://github.com/netdata/netdata/issues/6037) +- Fatal errors sometimes fail to halt the netdata daemon [\#5896](https://github.com/netdata/netdata/issues/5896) +- my-netdata menu dynamic sizing [\#5812](https://github.com/netdata/netdata/issues/5812) +- \[Security\] Docker socket exported writable; better use docker socket proxy [\#5680](https://github.com/netdata/netdata/issues/5680) +- Cant see user name when run netdata in docker [\#5585](https://github.com/netdata/netdata/issues/5585) +- No netdata-updater.sh in cron.daily after installing with kickstart-static64.sh [\#4122](https://github.com/netdata/netdata/issues/4122) +- If trailing slash is not included dashboard.js fails to load for slave dashboard for master [\#3820](https://github.com/netdata/netdata/issues/3820) +- Alarm "system.softnet\_stat" is very strict. [\#1076](https://github.com/netdata/netdata/issues/1076) +- netdata/packaging: Move tarball checksum information into lib dir of netdata [\#6555](https://github.com/netdata/netdata/pull/6555) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging: Adopt netdata-updater to run properly for static64 installations. [\#6520](https://github.com/netdata/netdata/pull/6520) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging: Do not deliver edit-config as part of the distribution tarball [\#6507](https://github.com/netdata/netdata/pull/6507) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Stop anonymous stats from writing log in /tmp [\#6491](https://github.com/netdata/netdata/pull/6491) ([cakrit](https://github.com/cakrit)) +- netdata/packaging: Fix RPM packaging workflow issues, plus draft changes for .DEB packaging [\#6415](https://github.com/netdata/netdata/pull/6415) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) + +**Closed issues:** + +- Nodes Views redirects to the agent instead of the nodes view upon login [\#6542](https://github.com/netdata/netdata/issues/6542) +- adding node.js for building web server [\#6521](https://github.com/netdata/netdata/issues/6521) +- Support for ScaleIO/VxFlexOS v3 [\#6475](https://github.com/netdata/netdata/issues/6475) +- Disable HTML email for alarms [\#6458](https://github.com/netdata/netdata/issues/6458) +- netdata/packaging: Make go.d plugin an independent module [\#6367](https://github.com/netdata/netdata/issues/6367) +- Global option for enabling charts with zero metrics [\#6315](https://github.com/netdata/netdata/issues/6315) +- Netdata variable granularity support in netdata daemon - with basic unit testing [\#6255](https://github.com/netdata/netdata/issues/6255) +- Netdata variable granularity support in dbengine [\#6254](https://github.com/netdata/netdata/issues/6254) +- Alarms on system boot [\#6114](https://github.com/netdata/netdata/issues/6114) +- \[Binary releases\] Create a script that will containerise the DEB build process [\#5968](https://github.com/netdata/netdata/issues/5968) +- health\_alarm\_notify Being Overwritten [\#5669](https://github.com/netdata/netdata/issues/5669) +- HTML sanitizer for dashboard\_info.js [\#5652](https://github.com/netdata/netdata/issues/5652) +- badge do not suppport other language [\#3117](https://github.com/netdata/netdata/issues/3117) +- Feature Request: Linux zram device statistics. [\#2578](https://github.com/netdata/netdata/issues/2578) +- Provide an include configuration mechanism [\#2360](https://github.com/netdata/netdata/issues/2360) +- Authentication support [\#70](https://github.com/netdata/netdata/issues/70) + +**Merged pull requests:** + +- Handle disconnected sockets in unbound collector. [\#6561](https://github.com/netdata/netdata/pull/6561) ([Ferroin](https://github.com/Ferroin)) +- netdata/packaging: Notify us when CHANGELOG.md gets too old [\#6556](https://github.com/netdata/netdata/pull/6556) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add configurable default locations for trusted CA certificates [\#6549](https://github.com/netdata/netdata/pull/6549) ([thiagoftsm](https://github.com/thiagoftsm)) +- smartd\_log: use `del\_dimension` instead of `hide\_dimension` to delete inactive disks [\#6547](https://github.com/netdata/netdata/pull/6547) ([ilyam8](https://github.com/ilyam8)) +- redirect after clicking Nodes \> SignIn [\#6544](https://github.com/netdata/netdata/pull/6544) ([jacekkolasa](https://github.com/jacekkolasa)) +- smartd\_log: Disk \_\_eq\_\_ fix [\#6540](https://github.com/netdata/netdata/pull/6540) ([ilyam8](https://github.com/ilyam8)) +- minor - code readability HTTP CODES as defines && clear warnings [\#6539](https://github.com/netdata/netdata/pull/6539) ([underhood](https://github.com/underhood)) +- \[ci skip\] minor/vanity - add self.name\(\) to contrib.md [\#6538](https://github.com/netdata/netdata/pull/6538) ([underhood](https://github.com/underhood)) +- Docs: Remove text about nightly version [\#6534](https://github.com/netdata/netdata/pull/6534) ([joelhans](https://github.com/joelhans)) +- Documentation navigation fix [\#6533](https://github.com/netdata/netdata/pull/6533) ([joelhans](https://github.com/joelhans)) +- .travis.yml: Fix some yamllint errors [\#6526](https://github.com/netdata/netdata/pull/6526) ([knatsakis](https://github.com/knatsakis)) +- mongodb: change `password` to `pass` in the module config [\#6518](https://github.com/netdata/netdata/pull/6518) ([ilyam8](https://github.com/ilyam8)) +- Fixed broken left navbar links in translated docs [\#6505](https://github.com/netdata/netdata/pull/6505) ([cakrit](https://github.com/cakrit)) +- Update CLA with intention to keep netdata FOSS [\#6504](https://github.com/netdata/netdata/pull/6504) ([cakrit](https://github.com/cakrit)) +- netdata/docs: Add @joelhans as co-owner on documentation [\#6501](https://github.com/netdata/netdata/pull/6501) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add support for plain text only emails [\#6485](https://github.com/netdata/netdata/pull/6485) ([leo-lb](https://github.com/leo-lb)) +- netdata/packaging: Enable built-in support for prometheus remote write in packaging [\#6480](https://github.com/netdata/netdata/pull/6480) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Fix the snappy library check [\#6479](https://github.com/netdata/netdata/pull/6479) ([vlvkobal](https://github.com/vlvkobal)) +- Add a statement about permissions for the diskspace plugin [\#6474](https://github.com/netdata/netdata/pull/6474) ([vlvkobal](https://github.com/vlvkobal)) +- Get user and group names from files [\#6472](https://github.com/netdata/netdata/pull/6472) ([vlvkobal](https://github.com/vlvkobal)) +- Fix parsing SSL ACL along with others [\#6468](https://github.com/netdata/netdata/pull/6468) ([thiagoftsm](https://github.com/thiagoftsm)) +- update Nginx guide with changes [\#6466](https://github.com/netdata/netdata/pull/6466) ([prhomhyse](https://github.com/prhomhyse)) +- netdata/packaging: Binary distributions - clean up .DEB package generation process [\#6465](https://github.com/netdata/netdata/pull/6465) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- python sensors collector: sensor chips filtering fix [\#6463](https://github.com/netdata/netdata/pull/6463) ([ilyam8](https://github.com/ilyam8)) +- Fix broken links [\#6461](https://github.com/netdata/netdata/pull/6461) ([TheLovinator1](https://github.com/TheLovinator1)) +- json function could create overflow [\#6460](https://github.com/netdata/netdata/pull/6460) ([thiagoftsm](https://github.com/thiagoftsm)) +- Fix nodes menu sizing \(responsive\) [\#6455](https://github.com/netdata/netdata/pull/6455) ([builat](https://github.com/builat)) +- Add netdata haproxy documentation page [\#6454](https://github.com/netdata/netdata/pull/6454) ([johnramsden](https://github.com/johnramsden)) +- Fix CRC and I/O error handling in dbengine [\#6452](https://github.com/netdata/netdata/pull/6452) ([mfundul](https://github.com/mfundul)) +- Stop docs icon from linking to streaming page instead of docs root [\#6445](https://github.com/netdata/netdata/pull/6445) ([joelhans](https://github.com/joelhans)) +- Add more supported backends to the documentation [\#6443](https://github.com/netdata/netdata/pull/6443) ([vlvkobal](https://github.com/vlvkobal)) +- netdata/packaging: Remove Ventureer from demo sites [\#6442](https://github.com/netdata/netdata/pull/6442) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Safer container names [\#6441](https://github.com/netdata/netdata/pull/6441) ([ViViDboarder](https://github.com/ViViDboarder)) +- Update docs health monitoring and health management api [\#6435](https://github.com/netdata/netdata/pull/6435) ([jghaanstra](https://github.com/jghaanstra)) +- Fix issue with HTML docs generation [\#6433](https://github.com/netdata/netdata/pull/6433) ([cakrit](https://github.com/cakrit)) +- rethinkdb collector new driver support [\#6431](https://github.com/netdata/netdata/pull/6431) ([ilyam8](https://github.com/ilyam8)) +- New 'homepage' for documentation site [\#6428](https://github.com/netdata/netdata/pull/6428) ([joelhans](https://github.com/joelhans)) +- Utf8 Badge Fix And URL Parser International Support \(initial\) [\#6426](https://github.com/netdata/netdata/pull/6426) ([underhood](https://github.com/underhood)) +- Styling improvements to documentation [\#6425](https://github.com/netdata/netdata/pull/6425) ([joelhans](https://github.com/joelhans)) +- Netdata/packaging: Add documentation for binary packages, plus draft table for distributions support [\#6422](https://github.com/netdata/netdata/pull/6422) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging/doc: Update documentation dependencies [\#6421](https://github.com/netdata/netdata/pull/6421) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add global configuration option for zero metrics [\#6419](https://github.com/netdata/netdata/pull/6419) ([vlvkobal](https://github.com/vlvkobal)) +- Updated logos in the infographic and remaining favicons [\#6417](https://github.com/netdata/netdata/pull/6417) ([cakrit](https://github.com/cakrit)) +- SSL vs. TLS consistency and clarification in documentation [\#6414](https://github.com/netdata/netdata/pull/6414) ([joelhans](https://github.com/joelhans)) +- Add more codeowners to the core [\#6413](https://github.com/netdata/netdata/pull/6413) ([vlvkobal](https://github.com/vlvkobal)) +- Add news of v1.16.0 to main README [\#6411](https://github.com/netdata/netdata/pull/6411) ([cakrit](https://github.com/cakrit)) +- Update Running-behind-apache.md [\#6406](https://github.com/netdata/netdata/pull/6406) ([Steve8291](https://github.com/Steve8291)) +- Fix Web API Health documentation [\#6404](https://github.com/netdata/netdata/pull/6404) ([thiagoftsm](https://github.com/thiagoftsm)) +- Snapshot uniqueId fix [\#6400](https://github.com/netdata/netdata/pull/6400) ([jacekkolasa](https://github.com/jacekkolasa)) +- Make use of GCC's \_\_attribute\_\_\(\(unused\)\) [\#6392](https://github.com/netdata/netdata/pull/6392) ([ac000](https://github.com/ac000)) +- Change default installation to stable in documentation [\#6388](https://github.com/netdata/netdata/pull/6388) ([joelhans](https://github.com/joelhans)) +- Daemon fix double kills of collection threads on shutdown [\#6387](https://github.com/netdata/netdata/pull/6387) ([emmrk](https://github.com/emmrk)) +- Add apps grouping debug messages [\#6375](https://github.com/netdata/netdata/pull/6375) ([vlvkobal](https://github.com/vlvkobal)) +- Reimplemented mypopen\(\) function family [\#6339](https://github.com/netdata/netdata/pull/6339) ([mfundul](https://github.com/mfundul)) +- ZRAM info collector module \(proc.plugin\) [\#6276](https://github.com/netdata/netdata/pull/6276) ([RaZeR-RBI](https://github.com/RaZeR-RBI)) +- Url parser refactoring [\#6247](https://github.com/netdata/netdata/pull/6247) ([thiagoftsm](https://github.com/thiagoftsm)) + ## [v1.16.0](https://github.com/netdata/netdata/tree/v1.16.0) (2019-07-08) **Fixed bugs:** @@ -774,7 +895,6 @@ - python.d/dockerd plugin update error [\#5200](https://github.com/netdata/netdata/issues/5200) - Netdata registry with basic auth \(behind nginx proxy\) results in error 409 [\#5180](https://github.com/netdata/netdata/issues/5180) - alarm-notify.sh: WARNING: Cannot find file [\#5136](https://github.com/netdata/netdata/issues/5136) -- Netdata w/ Docker Container not show Disk space utilization for mounts [\#5071](https://github.com/netdata/netdata/issues/5071) - zfs charts appear, even when they are zero [\#4115](https://github.com/netdata/netdata/issues/4115) - Ceph - No JSON object could be decoded [\#3563](https://github.com/netdata/netdata/issues/3563) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bfab928f..4d631f2fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -320,6 +320,7 @@ set(LIBNETDATA_FILES libnetdata/json/jsmn.h libnetdata/health/health.c libnetdata/health/health.h + libnetdata/string/utf8.h libnetdata/socket/security.c libnetdata/socket/security.h) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 0565644e8..1cb02caa5 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -28,8 +28,9 @@ The Contributor (*you*) hereby assigns netdata Inc. copyright in his contributions, to be licensed under the same terms as the rest of the code. > *Note: this means we may re-license netdata (your contributions included) -> any way we see fit, without asking your permission. -> Open-source licenses have significant differences and in our attempt to +> any way we see fit, without asking your permission. +> We intend to keep the netdata agent forever FOSS. +> But open-source licenses have significant differences and in our attempt to > help netdata grow we may have to distribute it under a different license. > For example, CNCF, the Cloud Native Computing Foundation, requires netdata > to be licensed under Apache-2.0 for it to be accepted as a member of the @@ -125,5 +126,6 @@ username|name|email (optional) @adherzog|Adam Herzog|adam@adamherzog.com @skrzyp1|Jerzy S.| @akwan|Alan Kwan| +@underhood|Timotej Šiškovič| [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md new file mode 100644 index 000000000..df18a6d4e --- /dev/null +++ b/DOCUMENTATION.md @@ -0,0 +1,53 @@ +# Netdata Documentation + +**Netdata is real-time health monitoring and performance troubleshooting for systems and applications.** It helps you instantly diagnose slowdowns and anomalies in your infrastructure with thousands of metrics, interactive visualizations, and insightful health alarms. + + +## Navigating the Netdata documentation + +Welcome! You've arrived at the documentation for Netdata. Use the links below to find answers to the most common questions about Netdata, such as how to install it, getting started guides, basic configuration, and adding more charts. Or, explore all of Netdata's documentation using the table of contents to your left. + +
+ + + + + +
+ +**Advanced users**: For those who already understand how to access a Netdata dashboard and perform basic configuration, feel free to see what's behind any of these other doors. + + - [Netdata Behind Nginx](docs/Running-behind-nginx.md): Use an Nginx web server instead of Netdata's built-in server to enable TLS, HTTPS, and basic authentication. + - [Add More Charts](docs/Add-more-charts-to-netdata.md): Enable new internal or external plugins and understand when auto-detection works. + - [Performance](docs/Performance.md): Tips on running Netdata on devices with limited CPU and RAM resources, such as embedded devices, IoT, and edge devices. + - [Streaming](streaming/): Information for those who want to centralize Netdata metrics from any number of distributed agents. + - [Backends](backends/): Learn how to archive Netdata's real-time metrics to a time series database (like Prometheus) for long-term archiving. + + +Visit the [contributing](CONTRIBUTING.md) page to find guides about the Netdata code of conduct, our community, and how you can get started contributing to Netdata. + + +## Subscribe for news and tips from monitoring pros + + + + +--- + +![A GIF of the standard Netdata dashboard](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif) \ No newline at end of file diff --git a/Makefile.am b/Makefile.am index bc928bba8..de161c849 100644 --- a/Makefile.am +++ b/Makefile.am @@ -167,6 +167,7 @@ LIBNETDATA_FILES = \ libnetdata/json/jsmn.h \ libnetdata/health/health.c \ libnetdata/health/health.h \ + libnetdata/string/utf8.h \ $(NULL) APPS_PLUGIN_FILES = \ diff --git a/README.md b/README.md index c47847d94..eb62a5759 100644 --- a/README.md +++ b/README.md @@ -74,22 +74,24 @@ When you install multiple Netdata, they are integrated into **one distributed ap *in the last 24 hours:*
[![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) ## Quick Start +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) -You can quickly install Netdata on a Linux box (physical, virtual, container, IoT) with the following command: +To install Netdata from source on any Linux system (physical, virtual, container, IoT, edge) and keep it up to date with our **nightly releases** automatically, run the following: -```sh +```bash # make sure you run `bash` for your shell bash -# install Netdata, directly from github sources +# install Netdata directly from GitHub source bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) + +To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](packaging/installer/README.md#nightly-vs-stable-releases). The above command will: -1. install any required packages on your system (it will ask you to confirm before doing so), -2. compile it, install it and start it +- Install any required packages on your system (it will ask you to confirm before doing so) +- Compile it, install it, and start it. More installation methods and additional options can be found at the [installation page](packaging/installer/#installation). @@ -98,6 +100,8 @@ To try Netdata in a docker container, run this: ``` docker run -d --name=netdata \ -p 19999:19999 \ + -v /etc/passwd:/host/etc/passwd:ro \ + -v /etc/group:/host/etc/group:ro \ -v /proc:/host/proc:ro \ -v /sys:/host/sys:ro \ -v /var/run/docker.sock:/var/run/docker.sock:ro \ @@ -146,6 +150,32 @@ not just visualize metrics. ## News +`Jul 9th, 2019` - **[Netdata v1.16.0 released!](https://github.com/netdata/netdata/releases)** + +Release v1.16.0 contains 40 bug fixes, 31 improvements and 20 documentation updates + +**Binary distributions.** To improve the security, speed and reliability of new netdata installations, we are delivering our own, industry standard installation method, with binary package distributions. The RPM binaries for the most common OSs are already available on packagecloud and we’ll have the DEB ones available very soon. All distributions are considered in Beta and, as always, we depend on our amazing community for feedback on improvements. + + - Our stable distributions are at [netdata/netdata @ packagecloud.io](https://packagecloud.io/netdata/netdata) + - The nightly builds are at [netdata/netdata-edge @ packagecloud.io](https://packagecloud.io/netdata/netdata-edge) + +**Netdata now supports TLS encryption!** You can secure the communication to the [web server](https://docs.netdata.cloud/web/server/#enabling-tls-support), the [streaming connections from slaves to the master](https://docs.netdata.cloud/streaming/#securing-the-communication) and the connection to an [openTSDB backend](https://docs.netdata.cloud/backends/opentsdb/#https). + +**This version also brings two long-awaited features to netdata’s health monitoring:** + + - The [health management API](https://docs.netdata.cloud/web/api/health/#health-management-api) introduced in v1.12 allowed you to easily disable alarms and/or notifications while netdata was running. However, those changes were not persisted across netdata restarts. Since part of routine maintenance activities may involve completely restarting a monitoring node, netdata now saves these configurations to disk, every time you issue a command to change the silencer settings. The new [LIST command](https://docs.netdata.cloud/web/api/health/#list-silencers) of the API allows you to view at any time which alarms are currently disabled or silenced. + - A way for netdata to [repeatedly send alarm notifications](https://docs.netdata.cloud/health/#alarm-line-repeat) for some, or all active alarms, at a frequency of your choosing. As a result, you will no longer have to worry about missing a notification, forgetting about a raised alarm. The default is still to only send a single notification, so that existing users are not surprised by a different behavior. + +As always, we’ve introduced new collectors, 5 of them this time: + + - Of special interest to people with Windows servers in their infrastructure is the [WMI collector](https://docs.netdata.cloud/collectors/go.d.plugin/modules/wmi/), though we are fully aware that we need to continue our efforts to do a proper port to Windows. + - The new `perf` plugin collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using the `perf_event_open()` system call. You can read a wonderful article on why this is useful [here](http://www.brendangregg.com/blog/2017-05-09/cpu-utilization-is-wrong.html). + - The other three are collectors to monitor [Dnsmasq DHCP leases](https://docs.netdata.cloud/collectors/go.d.plugin/modules/dnsmasq_dhcp/), [Riak KV servers](https://docs.netdata.cloud/collectors/python.d.plugin/riakkv/) and [Pihole instances](https://docs.netdata.cloud/collectors/go.d.plugin/modules/pihole/). + +Finally, the DB Engine introduced in v1.15.0 now uses much less memory and is more robust than before. + +--- + `May 21st, 2019` - **[Netdata v1.15.0 released!](https://github.com/netdata/netdata/releases)** Release v1.15.0 contains 11 bug fixes and 30 improvements. @@ -268,7 +298,7 @@ The result is a highly efficient, low latency system, supporting multiple reader This is a high level overview of Netdata feature set and architecture. Click it to to interact with it (it has direct links to documentation). -[![image](https://user-images.githubusercontent.com/2662304/47672043-a47eb480-dbb9-11e8-92a4-fa422d053309.png)](https://my-netdata.io/infographic.html) +[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html) ## Features @@ -294,7 +324,7 @@ This is what you should expect from Netdata: - **Notifications**: [alerta.io](health/notifications/alerta/), [amazon sns](health/notifications/awssns/), [discordapp.com](health/notifications/discord/), [email](health/notifications/email/), [flock.com](health/notifications/flock/), [irc](health/notifications/irc/), [kavenegar.com](health/notifications/kavenegar/), [messagebird.com](health/notifications/messagebird/), [pagerduty.com](health/notifications/pagerduty/), [prowl](health/notifications/prowl/), [pushbullet.com](health/notifications/pushbullet/), [pushover.net](health/notifications/pushover/), [rocket.chat](health/notifications/rocketchat/), [slack.com](health/notifications/slack/), [smstools3](health/notifications/smstools3/), [syslog](health/notifications/syslog/), [telegram.org](health/notifications/telegram/), [twilio.com](health/notifications/twilio/), [web](health/notifications/web/) and [custom notifications](health/notifications/custom/). ### Integrations -- **time-series dbs** - can archive its metrics to `graphite`, `opentsdb`, `prometheus`, json document DBs, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). +- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). ## Visualization diff --git a/backends/README.md b/backends/README.md index bdc409017..ef5baa1b6 100644 --- a/backends/README.md +++ b/backends/README.md @@ -33,7 +33,9 @@ X seconds (though, it can send them per second if you need it to). - **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata. - **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by - a lot of [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)) + **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**, + **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics**, + and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)) metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus protocol](prometheus/). Notes on using the remote write backend are [here](prometheus/remote_write/). diff --git a/backends/opentsdb/README.md b/backends/opentsdb/README.md index 3d57e2e1a..ab1f08bd3 100644 --- a/backends/opentsdb/README.md +++ b/backends/opentsdb/README.md @@ -1,7 +1,6 @@ # OpenTSDB with HTTP -Since version 1.16 the Netdata has the feature to communicate with OpenTSDB using HTTP API. To enable this channel -it is necessary to set the following options in your netdata.conf +Netdata can easily communicate with OpenTSDB using HTTP API. To enable this channel, set the following options in your `netdata.conf`: ``` [backend] @@ -9,13 +8,13 @@ it is necessary to set the following options in your netdata.conf destination = localhost:4242 ``` -, in this example we are considering that OpenTSDB is running with its default port (4242). +In this example, OpenTSDB is running with its default port, which is `4242`. If you run OpenTSDB on a different port, change the `destination = localhost:4242` line accordingly. ## HTTPS -Netdata also supports sending the metrics using SSL/TLS, but OpenTDSB does not have support to safety connections, -so it will be necessary to configure a reverse-proxy to enable the HTTPS communication. After to configure your proxy the -following changes must be done in the netdata.conf: +As of [v1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0), Netdata can send metrics to OpenTSDB using TLS/SSL. Unfortunately, OpenTDSB does not support encrypted connections, so you will have to configure a reverse proxy to enable HTTPS communication between Netdata and OpenTSBD. You can set up a reverse proxy with [Nginx](../../docs/Running-behind-nginx.md). + +After your proxy is configured, make the following changes to `netdata.conf`: ``` [backend] @@ -23,4 +22,4 @@ following changes must be done in the netdata.conf: destination = localhost:8082 ``` -In this example we used the port 8082 for our reverse proxy. +In this example, we used the port `8082` for our reverse proxy. If your reverse proxy listens on a different port, change the `destination = localhost:8082` line accordingly. diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index 4f2b2f8f7..a757a5bdd 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -191,6 +191,12 @@ double utime_fix_ratio = 1.0, cminflt_fix_ratio = 1.0, cmajflt_fix_ratio = 1.0; + +struct pid_on_target { + int32_t pid; + struct pid_on_target *next; +}; + // ---------------------------------------------------------------------------- // target // @@ -262,6 +268,8 @@ struct target { int starts_with; // if set, the compare string matches only the // beginning of the command + struct pid_on_target *root_pid; // list of aggregated pids for target debugging + struct target *target; // the one that will be reported to netdata struct target *next; }; @@ -491,6 +499,187 @@ static int all_files_len = 0, all_files_size = 0; +// ---------------------------------------------------------------------------- +// read users and groups from files + +struct user_or_group_id { + avl avl; + + union { + uid_t uid; + gid_t gid; + } id; + + char *name; + + int updated; + + struct user_or_group_id * next; +}; + +enum user_or_group_id_type { + USER_ID, + GROUP_ID +}; + +struct user_or_group_ids{ + enum user_or_group_id_type type; + + avl_tree index; + struct user_or_group_id *root; + + char filename[FILENAME_MAX + 1]; +}; + +int user_id_compare(void* a, void* b) { + if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid) + return -1; + + else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid) + return 1; + + else + return 0; +} + +struct user_or_group_ids all_user_ids = { + .type = USER_ID, + + .index = { + NULL, + user_id_compare + }, + + .root = NULL, + + .filename = "", +}; + +int group_id_compare(void* a, void* b) { + if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid) + return -1; + + else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid) + return 1; + + else + return 0; +} + +struct user_or_group_ids all_group_ids = { + .type = GROUP_ID, + + .index = { + NULL, + group_id_compare + }, + + .root = NULL, + + .filename = "", +}; + +int file_changed(const struct stat *statbuf, struct timespec *last_modification_time) { + if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec && + statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0; + + last_modification_time->tv_sec = statbuf->st_mtim.tv_sec; + last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec; + + return 1; +} + +int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) { + struct stat statbuf; + if(unlikely(stat(ids->filename, &statbuf))) + return 1; + else + if(likely(!file_changed(&statbuf, last_modification_time))) return 0; + + procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 1; + + size_t line, lines = procfile_lines(ff); + + for(line = 0; line < lines ;line++) { + size_t words = procfile_linewords(ff, line); + if(unlikely(words < 3)) continue; + + char *name = procfile_lineword(ff, line, 0); + if(unlikely(!name || !*name)) continue; + + char *id_string = procfile_lineword(ff, line, 2); + if(unlikely(!id_string || !*id_string)) continue; + + + struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id)); + + if(ids->type == USER_ID) + user_or_group_id->id.uid = (uid_t)str2ull(id_string); + else + user_or_group_id->id.gid = (uid_t)str2ull(id_string); + + user_or_group_id->name = strdupz(name); + user_or_group_id->updated = 1; + + struct user_or_group_id *existing_user_id = NULL; + + if(likely(ids->root)) + existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl *) user_or_group_id); + + if(unlikely(existing_user_id)) { + freez(existing_user_id->name); + existing_user_id->name = user_or_group_id->name; + existing_user_id->updated = 1; + freez(user_or_group_id); + } + else { + if(unlikely(avl_insert(&ids->index, (avl *) user_or_group_id) != (void *) user_or_group_id)) { + error("INTERNAL ERROR: duplicate indexing of id during realloc"); + }; + + user_or_group_id->next = ids->root; + ids->root = user_or_group_id; + } + } + + procfile_close(ff); + + // remove unused ids + struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL; + + while(user_or_group_id) { + if(unlikely(!user_or_group_id->updated)) { + if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl *) user_or_group_id) != user_or_group_id)) + error("INTERNAL ERROR: removal of unused id from index, removed a different id"); + + if(prev_user_id) + prev_user_id->next = user_or_group_id->next; + else + ids->root = user_or_group_id->next; + + freez(user_or_group_id->name); + freez(user_or_group_id); + + if(prev_user_id) + user_or_group_id = prev_user_id->next; + else + user_or_group_id = ids->root; + } + else { + user_or_group_id->updated = 0; + + prev_user_id = user_or_group_id; + user_or_group_id = user_or_group_id->next; + } + } + + return 0; +} + // ---------------------------------------------------------------------------- // apps_groups.conf // aggregate all processes in groups, to have a limited number of dimensions @@ -508,11 +697,27 @@ static struct target *get_users_target(uid_t uid) { snprintfz(w->id, MAX_NAME, "%u", uid); w->idhash = simple_hash(w->id); - struct passwd *pw = getpwuid(uid); - if(!pw || !pw->pw_name || !*pw->pw_name) - snprintfz(w->name, MAX_NAME, "%u", uid); - else - snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + struct user_or_group_id user_id_to_find, *user_or_group_id = NULL; + user_id_to_find.id.uid = uid; + + if(*netdata_configured_host_prefix) { + static struct timespec last_passwd_modification_time; + int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time); + + if(likely(!ret && all_user_ids.index.root)) + user_or_group_id = (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl *) &user_id_to_find); + } + + if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name) { + snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name); + } + else { + struct passwd *pw = getpwuid(uid); + if(!pw || !pw->pw_name || !*pw->pw_name) + snprintfz(w->name, MAX_NAME, "%u", uid); + else + snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + } netdata_fix_chart_name(w->name); @@ -540,11 +745,27 @@ struct target *get_groups_target(gid_t gid) snprintfz(w->id, MAX_NAME, "%u", gid); w->idhash = simple_hash(w->id); - struct group *gr = getgrgid(gid); - if(!gr || !gr->gr_name || !*gr->gr_name) - snprintfz(w->name, MAX_NAME, "%u", gid); - else - snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); + struct user_or_group_id group_id_to_find, *group_id = NULL; + group_id_to_find.id.gid = gid; + + if(*netdata_configured_host_prefix) { + static struct timespec last_group_modification_time; + int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time); + + if(likely(!ret && all_group_ids.index.root)) + group_id = (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl *) &group_id_to_find); + } + + if(group_id && group_id->name && *group_id->name) { + snprintfz(w->name, MAX_NAME, "%s", group_id->name); + } + else { + struct group *gr = getgrgid(gid); + if(!gr || !gr->gr_name || !*gr->gr_name) + snprintfz(w->name, MAX_NAME, "%u", gid); + else + snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); + } netdata_fix_chart_name(w->name); @@ -2006,7 +2227,7 @@ static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t tim return indent + 1; } -static inline void debug_print_process_tree(struct pid_stat *p, char *msg) { +static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) { debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited"); debug_print_process_and_parents(p, p->stat_collected_usec); } @@ -2657,6 +2878,18 @@ static size_t zero_all_targets(struct target *root) { w->openeventpolls = 0; w->openother = 0; } + + if(unlikely(w->root_pid)) { + struct pid_on_target *pid_on_target_to_free, *pid_on_target = w->root_pid; + + while(pid_on_target) { + pid_on_target_to_free = pid_on_target; + pid_on_target = pid_on_target->next; + free(pid_on_target_to_free); + } + + w->root_pid = NULL; + } } return count; @@ -2799,8 +3032,14 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, w->processes++; w->num_threads += p->num_threads; - if(unlikely(debug_enabled || w->debug_enabled)) + if(unlikely(debug_enabled || w->debug_enabled)) { debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt); + + struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target)); + pid_on_target->pid = p->pid; + pid_on_target->next = w->root_pid; + w->root_pid = pid_on_target; + } } static void calculate_netdata_statistics(void) { @@ -3321,6 +3560,18 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type for(w = root ; w ; w = w->next) { if (w->target) continue; + if(unlikely(w->processes && (debug_enabled || w->debug_enabled))) { + struct pid_on_target *pid_on_target; + + fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes, (w->processes == 1)?"":"es"); + + for(pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) { + fprintf(stderr, " %d", pid_on_target->pid); + } + + fputc('\n', stderr); + } + if (!w->exposed && w->processes) { newly_added++; w->exposed = 1; @@ -3788,6 +4039,12 @@ int main(int argc, char **argv) { info("started on pid %d", getpid()); + snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix); + debug_log("passwd file: '%s'", all_user_ids.filename); + + snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix); + debug_log("group file: '%s'", all_group_ids.filename); + #if (ALL_PIDS_ARE_READ_INSTANTLY == 0) all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max); #endif diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md index c01f9ec04..6ec9024da 100644 --- a/collectors/cgroups.plugin/README.md +++ b/collectors/cgroups.plugin/README.md @@ -110,6 +110,8 @@ By default, Netdata will enable monitoring metrics only when they are not zero. enable memory (used mem including cache) = yes ``` +You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + ### alarms CPU and memory limits are watched and used to rise alarms. Memory usage for every cgroup is checked against `ram` and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alarms is available in `health.d/cgroups.conf` file. diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in index 48f523885..784c06042 100755 --- a/collectors/cgroups.plugin/cgroup-name.sh.in +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -53,18 +53,25 @@ function docker_get_name_classic() { } function docker_get_name_api() { - local id="${1}" - if [ ! -S "${DOCKER_HOST}" ]; then - warning "Can't find ${DOCKER_HOST}" + local path="/containers/${1}/json" + if [ -z "${DOCKER_HOST}" ]; then + warning "No DOCKER_HOST is set" return 1 fi if ! command -v jq >/dev/null 2>&1; then warning "Can't find jq command line tool. jq is required for netdata to retrieve docker container name using ${DOCKER_HOST} API, falling back to docker ps" return 1 fi - - info "Running API command: /containers/${id}/json" - JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\\r\\n" | nc -U "${DOCKER_HOST}" | grep '^{.*') + if [ -S "${DOCKER_HOST}" ]; then + info "Running API command: curl --unix-socket ${DOCKER_HOST} http://localhost${path}" + JSON=$(curl -sS --unix-socket "${DOCKER_HOST}" "http://localhost${path}") + elif [ "${DOCKER_HOST}" == "/var/run/docker.sock" ]; then + warning "Docker socket was not found at ${DOCKER_HOST}" + return 1 + else + info "Running API command: curl ${DOCKER_HOST}${path}" + JSON=$(curl -sS "${DOCKER_HOST}${path}") + fi NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') return 0 } diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c index 87cb5ef12..d4f990061 100644 --- a/collectors/cgroups.plugin/cgroup-network.c +++ b/collectors/cgroups.plugin/cgroup-network.c @@ -86,7 +86,7 @@ unsigned int read_iface_ifindex(const char *prefix, const char *iface) { return (unsigned int)ifindex; } -struct iface *read_proc_net_dev(const char *scope, const char *prefix) { +struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *prefix) { if(!prefix) prefix = ""; procfile *ff = NULL; diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 4300788d5..d9d130f7e 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -575,7 +575,8 @@ static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) { cp->updated = 1; - if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) + if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && + (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) cp->enabled = CONFIG_BOOLEAN_YES; } } @@ -611,7 +612,8 @@ static inline void cgroup2_read_cpuacct_stat(struct cpuacct_stat *cp) { cp->updated = 1; - if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) + if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && + (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) cp->enabled = CONFIG_BOOLEAN_YES; } } @@ -668,7 +670,8 @@ static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) { ca->updated = 1; - if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total)) + if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && + (total || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) ca->enabled = CONFIG_BOOLEAN_YES; } } @@ -737,7 +740,7 @@ static inline void cgroup_read_blkio(struct blkio *io) { io->updated = 1; if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(io->Read || io->Write)) + if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) io->enabled = CONFIG_BOOLEAN_YES; else io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; @@ -787,7 +790,7 @@ static inline void cgroup2_read_blkio(struct blkio *io, unsigned int word_offset io->updated = 1; if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(io->Read || io->Write)) + if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) io->enabled = CONFIG_BOOLEAN_YES; else io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; @@ -881,7 +884,8 @@ static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unif if(( (!parent_cg_is_unified) && ( mem->total_cache || mem->total_dirty || mem->total_rss || mem->total_rss_huge || mem->total_mapped_file || mem->total_writeback || mem->total_swap || mem->total_pgpgin || mem->total_pgpgout || mem->total_pgfault || mem->total_pgmajfault)) || (parent_cg_is_unified && ( mem->anon || mem->total_dirty || mem->kernel_stack || mem->slab || mem->sock || mem->total_writeback - || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault))) + || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault)) + || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES) mem->enabled_detailed = CONFIG_BOOLEAN_YES; else mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations; @@ -893,14 +897,16 @@ memory_next: // read usage_in_bytes if(likely(mem->filename_usage_in_bytes)) { mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes); - if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes)) + if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && + (mem->usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES; } // read msw_usage_in_bytes if(likely(mem->filename_msw_usage_in_bytes)) { mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes); - if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes)) + if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && + (mem->msw_usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES; } @@ -913,10 +919,10 @@ memory_next: else { mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt); if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!mem->failcnt)) - mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; - else + if(unlikely(mem->failcnt || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) mem->enabled_failcnt = CONFIG_BOOLEAN_YES; + else + mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; } } } diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md index 8f859e350..ff98a8744 100644 --- a/collectors/diskspace.plugin/README.md +++ b/collectors/diskspace.plugin/README.md @@ -1,6 +1,6 @@ # diskspace.plugin -This plugin monitors the disk space usage of mounted disks, under Linux. +This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point. Two charts are available for every mount: - Disk Space Usage @@ -10,7 +10,7 @@ Two charts are available for every mount: Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`. -By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. ``` diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c index 77b87b093..eab607d84 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.c +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -249,7 +249,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { int rendered = 0; - if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (bavail || breserved_root || bused))) { + if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && + (bavail || breserved_root || bused || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!m->st_space)) { m->do_space = CONFIG_BOOLEAN_YES; m->st_space = rrdset_find_bytype_localhost("disk_space", disk); @@ -289,7 +291,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { // -------------------------------------------------------------------------- - if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (favail || freserved_root || fused))) { + if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && + (favail || freserved_root || fused || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!m->st_inodes)) { m->do_inodes = CONFIG_BOOLEAN_YES; m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk); diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md index 237e60921..618e053d2 100644 --- a/collectors/freebsd.plugin/README.md +++ b/collectors/freebsd.plugin/README.md @@ -2,4 +2,6 @@ Collects resource usage and performance data on FreeBSD systems +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c index 81a301e4a..910def599 100644 --- a/collectors/freebsd.plugin/freebsd_devstat.c +++ b/collectors/freebsd.plugin/freebsd_devstat.c @@ -352,7 +352,8 @@ int do_kern_devstat(int update_every, usec_t dt) { if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO && (dstat[i].bytes[DEVSTAT_READ] || dstat[i].bytes[DEVSTAT_WRITE] || - dstat[i].bytes[DEVSTAT_FREE]))) { + dstat[i].bytes[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_io)) { dm->st_io = rrdset_create_localhost("disk", disk, @@ -389,7 +390,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_ops)) { dm->st_ops = rrdset_create_localhost("disk_ops", disk, @@ -428,7 +430,9 @@ int do_kern_devstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO && - (dstat[i].start_count || dstat[i].end_count))) { + (dstat[i].start_count || + dstat[i].end_count || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_qops)) { dm->st_qops = rrdset_create_localhost("disk_qops", disk, @@ -457,7 +461,8 @@ int do_kern_devstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO && - cur_dstat.busy_time_ms)) { + (cur_dstat.busy_time_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_util)) { dm->st_util = rrdset_create_localhost("disk_util", disk, @@ -490,7 +495,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (cur_dstat.duration_read_ms || cur_dstat.duration_write_ms || cur_dstat.duration_other_ms || - cur_dstat.duration_free_ms))) { + cur_dstat.duration_free_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_iotime)) { dm->st_iotime = rrdset_create_localhost("disk_iotime", disk, @@ -538,7 +544,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_await)) { dm->st_await = rrdset_create_localhost("disk_await", disk, @@ -603,7 +610,8 @@ int do_kern_devstat(int update_every, usec_t dt) { if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO && (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_avagsz)) { dm->st_avagsz = rrdset_create_localhost("disk_avgsz", disk, @@ -660,7 +668,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_svctm)) { dm->st_svctm = rrdset_create_localhost("disk_svctm", disk, diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c index ac1638ee7..7e2293e43 100644 --- a/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -440,7 +440,9 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ibytes) || IFA_DATA(obytes)))) { + (IFA_DATA(ibytes) || + IFA_DATA(obytes) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_bandwidth)) { ifm->st_bandwidth = rrdset_create_localhost("net", ifa->ifa_name, @@ -469,7 +471,11 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) { + (IFA_DATA(ipackets) || + IFA_DATA(opackets) || + IFA_DATA(imcasts) || + IFA_DATA(omcasts) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_packets)) { ifm->st_packets = rrdset_create_localhost("net_packets", ifa->ifa_name, @@ -508,7 +514,9 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) { + (IFA_DATA(ierrors) || + IFA_DATA(oerrors) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_errors)) { ifm->st_errors = rrdset_create_localhost("net_errors", ifa->ifa_name, @@ -538,11 +546,11 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(iqdrops) + (IFA_DATA(iqdrops) || #if __FreeBSD__ >= 11 - || IFA_DATA(oqdrops) -#endif - ))) { + IFA_DATA(oqdrops) || + #endif + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_drops)) { ifm->st_drops = rrdset_create_localhost("net_drops", ifa->ifa_name, @@ -577,7 +585,8 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO && - IFA_DATA(collisions))) { + (IFA_DATA(collisions) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_events)) { ifm->st_events = rrdset_create_localhost("net_events", ifa->ifa_name, diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c index d050c6270..58b67a3c3 100644 --- a/collectors/freebsd.plugin/freebsd_getmntinfo.c +++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c @@ -216,7 +216,9 @@ int do_getmntinfo(int update_every, usec_t dt) { int rendered = 0; - if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) { + if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && + (mntbuf[i].f_blocks > 2 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_space)) { snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); @@ -255,7 +257,9 @@ int do_getmntinfo(int update_every, usec_t dt) { // -------------------------------------------------------------------------- - if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) { + if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && + (mntbuf[i].f_files > 1 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_inodes)) { snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c index b56fdc079..402813fe0 100644 --- a/collectors/freebsd.plugin/freebsd_sysctl.c +++ b/collectors/freebsd.plugin/freebsd_sysctl.c @@ -1941,7 +1941,13 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop || tcpstat.tcps_finwait2_drops))) { + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvpackafterwin || + tcpstat.tcps_rcvafterclose || + tcpstat.tcps_rcvmemdrop || + tcpstat.tcps_persistdrop || + tcpstat.tcps_finwait2_drops || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -1982,7 +1988,9 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvoopack || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2014,7 +2022,11 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_sc_sendcookie || + tcpstat.tcps_sc_recvcookie || + tcpstat.tcps_sc_zonefail || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syncookies = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2050,7 +2062,9 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) { + if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_listendrop || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_listen = CONFIG_BOOLEAN_YES; static RRDSET *st_listen = NULL; @@ -2085,7 +2099,11 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_ce || tcpstat.tcps_ecn_ect0 || tcpstat.tcps_ecn_ect1))) { + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_ecn_ce || + tcpstat.tcps_ecn_ect0 || + tcpstat.tcps_ecn_ect1 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2626,8 +2644,11 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + (ip6stat.ip6s_localout || + ip6stat.ip6s_total || + ip6stat.ip6s_forward || + ip6stat.ip6s_delivered || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2666,8 +2687,10 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { + (ip6stat.ip6s_fragmented || + ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2706,8 +2729,11 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + (ip6stat.ip6s_reassembled || + ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || + ip6stat.ip6s_fragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2747,16 +2773,17 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2872,7 +2899,10 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && + (icmp6_total.msgs_in || + icmp6_total.msgs_out || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6 = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2907,7 +2937,10 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_REDIRECT] || + icmp6stat.icp6s_outhist[ND_REDIRECT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_redir = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2941,18 +2974,19 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3005,11 +3039,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_echos = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3047,11 +3082,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_router = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3090,11 +3126,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_neighbor = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3133,17 +3170,18 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md index 3e2554e47..fcb4670ea 100644 --- a/collectors/macos.plugin/README.md +++ b/collectors/macos.plugin/README.md @@ -2,4 +2,6 @@ Collects resource usage and performance data on MacOS systems +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c index a8af72e69..dddafc9f5 100644 --- a/collectors/macos.plugin/macos_sysctl.c +++ b/collectors/macos.plugin/macos_sysctl.c @@ -479,7 +479,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop))) { + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvpackafterwin || + tcpstat.tcps_rcvafterclose || + tcpstat.tcps_rcvmemdrop || + tcpstat.tcps_persistdrop || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpconnaborts"); if (unlikely(!st)) { @@ -514,7 +519,9 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvoopack || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpofo"); if (unlikely(!st)) { @@ -543,7 +550,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_sc_sendcookie || + tcpstat.tcps_sc_recvcookie || + tcpstat.tcps_sc_zonefail || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syscookies = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpsyncookies"); @@ -579,7 +590,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_recv_ce || tcpstat.tcps_ecn_not_supported))) { + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_ecn_recv_ce || + tcpstat.tcps_ecn_not_supported || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.ecnpkts"); if (unlikely(!st)) { @@ -980,8 +994,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { error("DISABLED: ipv6.errors"); } else { if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + (ip6stat.ip6s_localout || + ip6stat.ip6s_total || + ip6stat.ip6s_forward || + ip6stat.ip6s_delivered || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_packets = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.packets"); if (unlikely(!st)) { @@ -1017,8 +1034,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { + (ip6stat.ip6s_fragmented || + ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsout = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.fragsout"); if (unlikely(!st)) { @@ -1053,8 +1072,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + (ip6stat.ip6s_reassembled || + ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || + ip6stat.ip6s_fragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsin = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.fragsin"); if (unlikely(!st)) { @@ -1090,16 +1112,17 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_errors = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.errors"); if (unlikely(!st)) { @@ -1158,7 +1181,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; } icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && + (icmp6_total.msgs_in || + icmp6_total.msgs_out || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6 = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmp"); if (unlikely(!st)) { @@ -1189,7 +1215,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_REDIRECT] || + icmp6stat.icp6s_outhist[ND_REDIRECT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_redir = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpredir"); if (unlikely(!st)) { @@ -1220,18 +1249,19 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_errors = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmperrors"); if (unlikely(!st)) { @@ -1279,11 +1309,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_echos = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpechos"); if (unlikely(!st)) { @@ -1318,11 +1349,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_router = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmprouter"); if (unlikely(!st)) { @@ -1357,11 +1389,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_neighbor = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpneighbor"); if (unlikely(!st)) { @@ -1396,17 +1429,18 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_types = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmptypes"); if (unlikely(!st)) { diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c index 66ec5d0ea..aea2704a3 100644 --- a/collectors/plugins.d/plugins_d.c +++ b/collectors/plugins.d/plugins_d.c @@ -117,6 +117,103 @@ inline int pluginsd_split_words(char *str, char **words, int max_words) { return quoted_strings_splitter(str, words, max_words, pluginsd_space); } +#ifdef ENABLE_HTTPS +/** + * Update Buffer + * + * Update the temporary buffer used to parse data received from slave + * + * @param output is a pointer to the vector where I will store the data + * @param ssl is the connection pointer with the server + * + * @return it returns the total of bytes read on success and a negative number otherwise + */ +int pluginsd_update_buffer(char *output, SSL *ssl) { + ERR_clear_error(); + int bytesleft = SSL_read(ssl, output, PLUGINSD_LINE_MAX_SSL_READ); + if(bytesleft <= 0) { + int sslerrno = SSL_get_error(ssl, bytesleft); + switch(sslerrno) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + { + break; + } + default: + { + u_long err; + char buf[256]; + int counter = 0; + while ((err = ERR_get_error()) != 0) { + ERR_error_string_n(err, buf, sizeof(buf)); + info("%d SSL Handshake error (%s) on socket %d ", counter++, ERR_error_string((long)SSL_get_error(ssl, bytesleft), NULL), SSL_get_fd(ssl)); + } + } + + } + } else { + output[bytesleft] = '\0'; + } + + return bytesleft; +} + +/** + * Get from Buffer + * + * Get data to process from buffer + * + * @param output is the output vector that will be used to parse the string. + * @param bytesread the amount of bytes read in the previous iteration. + * @param input the input vector where there are data to process + * @param ssl a pointer to the connection with the server + * @param src the first address of the input, because sometime will be necessary to restart the addr with it. + * + * @return It returns a pointer for the next iteration on success and NULL otherwise. + */ +char * pluginsd_get_from_buffer(char *output, int *bytesread, char *input, SSL *ssl, char *src) { + int copying = 1; + char *endbuffer; + size_t length; + while(copying) { + if(*bytesread > 0) { + endbuffer = strchr(input, '\n'); + if(endbuffer) { + copying = 0; + endbuffer++; //Advance due the fact I wanna copy '\n' + length = endbuffer - input; + *bytesread -= length; + + memcpy(output, input, length); + output += length; + *output = '\0'; + input += length; + }else { + length = strlen(input); + memcpy(output, input, length); + output += length; + input = src; + + *bytesread = pluginsd_update_buffer(input, ssl); + if(*bytesread <= 0) { + input = NULL; + copying = 0; + } + } + }else { + //reduce sample of bytes read, print the length + *bytesread = pluginsd_update_buffer(input, ssl); + if(*bytesread <= 0) { + input = NULL; + copying = 0; + } + } + } + + return input; +} +#endif + inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) { int enabled = cd->enabled; @@ -149,10 +246,43 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int goto cleanup; } +#ifdef ENABLE_HTTPS + int bytesleft = 0; + char tmpbuffer[PLUGINSD_LINE_MAX]; + char *readfrom; +#endif + char *r = NULL; while(!ferror(fp)) { if(unlikely(netdata_exit)) break; - char *r = fgets(line, PLUGINSD_LINE_MAX, fp); +#ifdef ENABLE_HTTPS + int normalread = 1; + if(netdata_srv_ctx) { + if(host->ssl.conn && !host->ssl.flags) { + if(!bytesleft) { + r = line; + readfrom = tmpbuffer; + bytesleft = pluginsd_update_buffer(readfrom, host->ssl.conn); + if(bytesleft <= 0) { + break; + } + } + + readfrom = pluginsd_get_from_buffer(line, &bytesleft, readfrom, host->ssl.conn, tmpbuffer); + if(!readfrom) { + r = NULL; + } + + normalread = 0; + } + } + + if(normalread) { + r = fgets(line, PLUGINSD_LINE_MAX, fp); + } +#else + r = fgets(line, PLUGINSD_LINE_MAX, fp); +#endif if(unlikely(!r)) { if(feof(fp)) error("read failed: end of file"); @@ -526,6 +656,70 @@ static void pluginsd_worker_thread_cleanup(void *arg) { } } +#define SERIAL_FAILURES_THRESHOLD 10 +static void pluginsd_worker_thread_handle_success(struct plugind *cd) { + if (likely(cd->successful_collections)) { + sleep((unsigned int) cd->update_every); + return; + } + + if(likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) { + info("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", + cd->fullfilename, cd->pid, + cd->enabled ? + "Waiting a bit before starting it again." : + "Will not start it again - it is now disabled."); + sleep((unsigned int) (cd->update_every * 10)); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0)." + "We have tried to collect something %zu times - unsuccessfully. Disabling it.", + cd->fullfilename, cd->pid, cd->serial_failures); + cd->enabled = 0; + return; + } + + return; +} + +static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) { + if (worker_ret_code == -1) { + info("'%s' (pid %d) was killed with SIGTERM. Disabling it.", cd->fullfilename, cd->pid); + cd->enabled = 0; + return; + } + + if (!cd->successful_collections) { + error("'%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", + cd->fullfilename, cd->pid, worker_ret_code); + cd->enabled = 0; + return; + } + + if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", + cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, + cd->enabled ? + "Waiting a bit before starting it again." : + "Will not start it again - it is disabled."); + sleep((unsigned int) (cd->update_every * 10)); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." + "We tried to restart it %zu times, but it failed to generate data. Disabling it.", + cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, cd->serial_failures); + cd->enabled = 0; + return; + } + + return; +} +#undef SERIAL_FAILURES_THRESHOLD + void *pluginsd_worker_thread(void *arg) { netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg); @@ -546,50 +740,14 @@ void *pluginsd_worker_thread(void *arg) { error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count); killpid(cd->pid, SIGTERM); - // get the return code - int code = mypclose(fp, cd->pid); + int worker_ret_code = mypclose(fp, cd->pid); - if(code != 0) { - // the plugin reports failure + if (likely(worker_ret_code == 0)) + pluginsd_worker_thread_handle_success(cd); + else + pluginsd_worker_thread_handle_error(cd, worker_ret_code); - if(likely(!cd->successful_collections)) { - // nothing collected - disable it - error("'%s' (pid %d) exited with error code %d. Disabling it.", cd->fullfilename, cd->pid, code); - cd->enabled = 0; - } - else { - // we have collected something - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). We tried %zu times to restart it, but it failed to generate data. Disabling it.", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->serial_failures); - cd->enabled = 0; - } - } - } - else { - // the plugin reports success - - if(unlikely(!cd->successful_collections)) { - // we have collected nothing so far - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", cd->fullfilename, cd->pid, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is now disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0), but we have tried %zu times to collect something. Disabling it.", cd->fullfilename, cd->pid, cd->serial_failures); - cd->enabled = 0; - } - } - else - sleep((unsigned int) cd->update_every); - } cd->pid = 0; - if(unlikely(!cd->enabled)) break; } diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h index 04d5de3d3..7d5c7dda4 100644 --- a/collectors/plugins.d/plugins_d.h +++ b/collectors/plugins.d/plugins_d.h @@ -31,6 +31,7 @@ #define PLUGINSD_KEYWORD_VARIABLE "VARIABLE" #define PLUGINSD_LINE_MAX 1024 +#define PLUGINSD_LINE_MAX_SSL_READ 512 #define PLUGINSD_MAX_WORDS 20 #define PLUGINSD_MAX_DIRECTORIES 20 diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md index cacde84f6..9513877d3 100644 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -75,7 +75,7 @@ netdata will automatically set the name of disks on the dashboard, from the moun ### performance metrics -By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. netdata categorizes all block devices in 3 categories: diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c index cd467948c..eee0cbe7f 100644 --- a/collectors/proc.plugin/proc_diskstats.c +++ b/collectors/proc.plugin/proc_diskstats.c @@ -973,7 +973,9 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------------- // Do performance metrics - if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) { + if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && + (readsectors || writesectors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_io = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_io)) { @@ -1004,7 +1006,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes))) { + if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_ops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_ops)) { @@ -1037,7 +1040,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && queued_ios)) { + if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && + (queued_ios || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_qops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_qops)) { @@ -1068,7 +1072,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && backlog_ms)) { + if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && + (backlog_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_backlog = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_backlog)) { @@ -1099,7 +1104,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) { + if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && + (busy_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_util = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_util)) { @@ -1130,7 +1136,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && (mreads || mwrites))) { + if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && + (mreads || mwrites || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_mops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_mops)) { @@ -1163,7 +1170,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) { + if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && + (readms || writems || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_iotime = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_iotime)) { @@ -1199,8 +1207,12 @@ int do_proc_diskstats(int update_every, usec_t dt) { // only if this is not the first time we run if(likely(dt)) { - if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && + (readms || writems || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_await)) { d->st_await = rrdset_create_localhost( @@ -1230,8 +1242,10 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_await); } - if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && + (readsectors || writesectors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_avgsz)) { d->st_avgsz = rrdset_create_localhost( @@ -1261,8 +1275,12 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_avgsz); } - if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && + (busy_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_svctm)) { d->st_svctm = rrdset_create_localhost( @@ -1505,7 +1523,11 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_bcache_cache_read_races); } - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_hits != 0 || stats_total_cache_misses != 0 || stats_total_cache_miss_collisions != 0))) { + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && + (stats_total_cache_hits || + stats_total_cache_misses || + stats_total_cache_miss_collisions || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!d->st_bcache)) { d->st_bcache = rrdset_create_localhost( @@ -1539,7 +1561,10 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_bcache); } - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_bypass_hits != 0 || stats_total_cache_bypass_misses != 0))) { + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && + (stats_total_cache_bypass_hits || + stats_total_cache_bypass_misses || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!d->st_bcache_bypass)) { d->st_bcache_bypass = rrdset_create_localhost( @@ -1575,7 +1600,9 @@ int do_proc_diskstats(int update_every, usec_t dt) { // ------------------------------------------------------------------------ // update the system total I/O - if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && (system_read_kb || system_write_kb))) { + if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && + (system_read_kb || system_write_kb || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { static RRDSET *st_io = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c index ae399c440..92135393d 100644 --- a/collectors/proc.plugin/proc_meminfo.c +++ b/collectors/proc.plugin/proc_meminfo.c @@ -219,7 +219,9 @@ int do_proc_meminfo(int update_every, usec_t dt) { unsigned long long SwapUsed = SwapTotal - SwapFree; - if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) { + if(do_swap == CONFIG_BOOLEAN_YES || (do_swap == CONFIG_BOOLEAN_AUTO && + (SwapTotal || SwapUsed || SwapFree || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_swap = CONFIG_BOOLEAN_YES; static RRDSET *st_system_swap = NULL; @@ -256,7 +258,10 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) { + if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && + (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && + (HardwareCorrupted > 0 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { do_hwcorrupt = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_hwcorrupt = NULL; @@ -438,7 +443,9 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && Hugepagesize != 0 && HugePages_Total != 0)) { + if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && + ((Hugepagesize && HugePages_Total) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_hugepages = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_hugepages = NULL; @@ -479,7 +486,10 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && (AnonHugePages != 0 || ShmemHugePages != 0))) { + if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && + (AnonHugePages || + ShmemHugePages || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_transparent_hugepages = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_transparent_hugepages = NULL; diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index 1e426e977..8d9751d1c 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -601,7 +601,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_bandwidth == CONFIG_BOOLEAN_AUTO && (d->rbytes || d->tbytes)))) + if(unlikely(d->do_bandwidth == CONFIG_BOOLEAN_AUTO && + (d->rbytes || d->tbytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_bandwidth = CONFIG_BOOLEAN_YES; if(d->do_bandwidth == CONFIG_BOOLEAN_YES) { @@ -671,7 +672,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_packets == CONFIG_BOOLEAN_AUTO && (d->rpackets || d->tpackets || d->rmulticast)))) + if(unlikely(d->do_packets == CONFIG_BOOLEAN_AUTO && + (d->rpackets || d->tpackets || d->rmulticast || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_packets = CONFIG_BOOLEAN_YES; if(d->do_packets == CONFIG_BOOLEAN_YES) { @@ -716,7 +718,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_errors == CONFIG_BOOLEAN_AUTO && (d->rerrors || d->terrors)))) + if(unlikely(d->do_errors == CONFIG_BOOLEAN_AUTO && + (d->rerrors || d->terrors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_errors = CONFIG_BOOLEAN_YES; if(d->do_errors == CONFIG_BOOLEAN_YES) { @@ -759,7 +762,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_drops == CONFIG_BOOLEAN_AUTO && (d->rdrops || d->tdrops)))) + if(unlikely(d->do_drops == CONFIG_BOOLEAN_AUTO && + (d->rdrops || d->tdrops || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_drops = CONFIG_BOOLEAN_YES; if(d->do_drops == CONFIG_BOOLEAN_YES) { @@ -802,7 +806,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_fifo == CONFIG_BOOLEAN_AUTO && (d->rfifo || d->tfifo)))) + if(unlikely(d->do_fifo == CONFIG_BOOLEAN_AUTO && + (d->rfifo || d->tfifo || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_fifo = CONFIG_BOOLEAN_YES; if(d->do_fifo == CONFIG_BOOLEAN_YES) { @@ -845,7 +850,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_compressed == CONFIG_BOOLEAN_AUTO && (d->rcompressed || d->tcompressed)))) + if(unlikely(d->do_compressed == CONFIG_BOOLEAN_AUTO && + (d->rcompressed || d->tcompressed || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_compressed = CONFIG_BOOLEAN_YES; if(d->do_compressed == CONFIG_BOOLEAN_YES) { @@ -888,7 +894,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_events == CONFIG_BOOLEAN_AUTO && (d->rframe || d->tcollisions || d->tcarrier)))) + if(unlikely(d->do_events == CONFIG_BOOLEAN_AUTO && + (d->rframe || d->tcollisions || d->tcarrier || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_events = CONFIG_BOOLEAN_YES; if(d->do_events == CONFIG_BOOLEAN_YES) { @@ -924,7 +931,9 @@ int do_proc_net_dev(int update_every, usec_t dt) { } } - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (system_rbytes || system_tbytes))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (system_rbytes || system_tbytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st_system_net = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c index 2dc3c59c0..ab8206be3 100644 --- a/collectors/proc.plugin/proc_net_netstat.c +++ b/collectors/proc.plugin/proc_net_netstat.c @@ -262,7 +262,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (ipext_InOctets || + ipext_OutOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st_system_ip = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; @@ -297,7 +300,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) { + if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && + (ipext_InNoRoutes || + ipext_InTruncatedPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_inerrors = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_inerrors = NULL; static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL; @@ -336,7 +342,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) { + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && + (ipext_InMcastOctets || + ipext_OutMcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_mcast = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; @@ -373,7 +382,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) { + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && + (ipext_InBcastOctets || + ipext_OutBcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_bcast = NULL; @@ -411,7 +423,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) { + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && + (ipext_InMcastPkts || + ipext_OutMcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_mcastpkts = NULL; @@ -448,7 +463,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) { + if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && + (ipext_InBcastPkts || + ipext_OutBcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_bcastpkts = NULL; @@ -486,7 +504,12 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) { + if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (ipext_InCEPkts || + ipext_InECT0Pkts || + ipext_InECT1Pkts || + ipext_InNoECTPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; static RRDSET *st_ecnpkts = NULL; @@ -538,7 +561,9 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) { + if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPMemoryPressures || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_memory = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpmemorypressures = NULL; @@ -572,7 +597,14 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) { + if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPAbortOnData || + tcpext_TCPAbortOnClose || + tcpext_TCPAbortOnMemory || + tcpext_TCPAbortOnTimeout || + tcpext_TCPAbortOnLinger || + tcpext_TCPAbortFailed || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpconnaborts = NULL; @@ -616,7 +648,12 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) { + if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPRenoReorder || + tcpext_TCPFACKReorder || + tcpext_TCPSACKReorder || + tcpext_TCPTSReorder || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_reorder = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpreorders = NULL; @@ -656,7 +693,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) { + if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPOFOQueue || + tcpext_TCPOFODrop || + tcpext_TCPOFOMerge || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_tcpofo = NULL; @@ -697,7 +738,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) { + if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && + (tcpext_SyncookiesSent || + tcpext_SyncookiesRecv || + tcpext_SyncookiesFailed || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syscookies = CONFIG_BOOLEAN_YES; static RRDSET *st_syncookies = NULL; @@ -736,7 +781,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && (tcpext_TCPReqQFullDrop || tcpext_TCPReqQFullDoCookies))) { + if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPReqQFullDrop || + tcpext_TCPReqQFullDoCookies || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syn_queue = CONFIG_BOOLEAN_YES; static RRDSET *st_syn_queue = NULL; @@ -775,7 +823,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && (tcpext_ListenOverflows || tcpext_ListenDrops))) { + if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && + (tcpext_ListenOverflows || + tcpext_ListenDrops || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_accept_queue = CONFIG_BOOLEAN_YES; static RRDSET *st_accept_queue = NULL; diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c index bd1062e98..343cc5afb 100644 --- a/collectors/proc.plugin/proc_net_sctp_snmp.c +++ b/collectors/proc.plugin/proc_net_sctp_snmp.c @@ -124,7 +124,8 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && SctpCurrEstab)) { + if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && + (SctpCurrEstab || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_associations = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_established = NULL; @@ -155,7 +156,12 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && (SctpActiveEstabs || SctpPassiveEstabs || SctpAborteds || SctpShutdowns))) { + if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && + (SctpActiveEstabs || + SctpPassiveEstabs || + SctpAborteds || + SctpShutdowns || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_transitions = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_active = NULL, @@ -195,7 +201,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && (SctpInSCTPPacks || SctpOutSCTPPacks))) { + if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && + (SctpInSCTPPacks || + SctpOutSCTPPacks || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -230,7 +239,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && (SctpOutOfBlues || SctpChecksumErrors))) { + if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && + (SctpOutOfBlues || + SctpChecksumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_packet_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_invalid = NULL, @@ -265,7 +277,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && (SctpFragUsrMsgs || SctpReasmUsrMsgs))) { + if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && + (SctpFragUsrMsgs || + SctpReasmUsrMsgs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_fragmentation = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -300,8 +315,14 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO - && (SctpInCtrlChunks || SctpInOrderChunks || SctpInUnorderChunks || SctpOutCtrlChunks || SctpOutOrderChunks || SctpOutUnorderChunks))) { + if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO && + (SctpInCtrlChunks || + SctpInOrderChunks || + SctpInUnorderChunks || + SctpOutCtrlChunks || + SctpOutOrderChunks || + SctpOutUnorderChunks || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_chunk_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM diff --git a/collectors/proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c index ffd368f6e..b03a6ac74 100644 --- a/collectors/proc.plugin/proc_net_snmp.c +++ b/collectors/proc.plugin/proc_net_snmp.c @@ -258,7 +258,12 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_OutRequests || snmp_root.ip_InReceives || snmp_root.ip_ForwDatagrams || snmp_root.ip_InDelivers))) { + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_OutRequests || + snmp_root.ip_InReceives || + snmp_root.ip_ForwDatagrams || + snmp_root.ip_InDelivers || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -299,7 +304,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_FragOKs || snmp_root.ip_FragFails || snmp_root.ip_FragCreates))) { + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_FragOKs || + snmp_root.ip_FragFails || + snmp_root.ip_FragCreates || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -338,7 +347,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_ReasmOKs || snmp_root.ip_ReasmFails || snmp_root.ip_ReasmReqds))) { + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_ReasmOKs || + snmp_root.ip_ReasmFails || + snmp_root.ip_ReasmReqds || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -377,7 +390,14 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_InDiscards || snmp_root.ip_OutDiscards || snmp_root.ip_InHdrErrors || snmp_root.ip_InAddrErrors || snmp_root.ip_InUnknownProtos || snmp_root.ip_OutNoRoutes))) { + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_InDiscards || + snmp_root.ip_OutDiscards || + snmp_root.ip_InHdrErrors || + snmp_root.ip_InAddrErrors || + snmp_root.ip_InUnknownProtos || + snmp_root.ip_OutNoRoutes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -447,7 +467,13 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.icmp_InMsgs || snmp_root.icmp_OutMsgs || snmp_root.icmp_InErrors || snmp_root.icmp_OutErrors || snmp_root.icmp_InCsumErrors))) { + if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.icmp_InMsgs || + snmp_root.icmp_OutMsgs || + snmp_root.icmp_InErrors || + snmp_root.icmp_OutErrors || + snmp_root.icmp_InCsumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_packets = CONFIG_BOOLEAN_YES; { @@ -540,28 +566,28 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && ( - snmp_root.icmpmsg_InEchoReps - || snmp_root.icmpmsg_OutEchoReps - || snmp_root.icmpmsg_InDestUnreachs - || snmp_root.icmpmsg_OutDestUnreachs - || snmp_root.icmpmsg_InRedirects - || snmp_root.icmpmsg_OutRedirects - || snmp_root.icmpmsg_InEchos - || snmp_root.icmpmsg_OutEchos - || snmp_root.icmpmsg_InRouterAdvert - || snmp_root.icmpmsg_OutRouterAdvert - || snmp_root.icmpmsg_InRouterSelect - || snmp_root.icmpmsg_OutRouterSelect - || snmp_root.icmpmsg_InTimeExcds - || snmp_root.icmpmsg_OutTimeExcds - || snmp_root.icmpmsg_InParmProbs - || snmp_root.icmpmsg_OutParmProbs - || snmp_root.icmpmsg_InTimestamps - || snmp_root.icmpmsg_OutTimestamps - || snmp_root.icmpmsg_InTimestampReps - || snmp_root.icmpmsg_OutTimestampReps - ))) { + if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && + (snmp_root.icmpmsg_InEchoReps || + snmp_root.icmpmsg_OutEchoReps || + snmp_root.icmpmsg_InDestUnreachs || + snmp_root.icmpmsg_OutDestUnreachs || + snmp_root.icmpmsg_InRedirects || + snmp_root.icmpmsg_OutRedirects || + snmp_root.icmpmsg_InEchos || + snmp_root.icmpmsg_OutEchos || + snmp_root.icmpmsg_InRouterAdvert || + snmp_root.icmpmsg_OutRouterAdvert || + snmp_root.icmpmsg_InRouterSelect || + snmp_root.icmpmsg_OutRouterSelect || + snmp_root.icmpmsg_InTimeExcds || + snmp_root.icmpmsg_OutTimeExcds || + snmp_root.icmpmsg_InParmProbs || + snmp_root.icmpmsg_OutParmProbs || + snmp_root.icmpmsg_InTimestamps || + snmp_root.icmpmsg_OutTimestamps || + snmp_root.icmpmsg_InTimestampReps || + snmp_root.icmpmsg_OutTimestampReps || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmpmsg = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -677,7 +703,9 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && snmp_root.tcp_CurrEstab)) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_CurrEstab || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -709,7 +737,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InSegs || snmp_root.tcp_OutSegs))) { + if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_InSegs || + snmp_root.tcp_OutSegs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -744,7 +775,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InErrs || snmp_root.tcp_InCsumErrors || snmp_root.tcp_RetransSegs))) { + if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_InErrs || + snmp_root.tcp_InCsumErrors || + snmp_root.tcp_RetransSegs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -783,7 +818,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_ActiveOpens || snmp_root.tcp_PassiveOpens))) { + if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_ActiveOpens || + snmp_root.tcp_PassiveOpens || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_opens = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -819,7 +857,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_EstabResets || snmp_root.tcp_OutRsts || snmp_root.tcp_AttemptFails))) { + if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_EstabResets || + snmp_root.tcp_OutRsts || + snmp_root.tcp_AttemptFails || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_handshake = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -882,7 +924,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- // see http://net-snmp.sourceforge.net/docs/mibs/udp.html - if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.udp_InDatagrams || snmp_root.udp_OutDatagrams))) { + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.udp_InDatagrams || + snmp_root.udp_OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -917,14 +962,14 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && ( - snmp_root.udp_InErrors - || snmp_root.udp_NoPorts - || snmp_root.udp_RcvbufErrors - || snmp_root.udp_SndbufErrors - || snmp_root.udp_InCsumErrors - || snmp_root.udp_IgnoredMulti - ))) { + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.udp_InErrors || + snmp_root.udp_NoPorts || + snmp_root.udp_RcvbufErrors || + snmp_root.udp_SndbufErrors || + snmp_root.udp_InCsumErrors || + snmp_root.udp_IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -992,16 +1037,16 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && ( - snmp_root.udplite_InDatagrams - || snmp_root.udplite_OutDatagrams - || snmp_root.udplite_NoPorts - || snmp_root.udplite_InErrors - || snmp_root.udplite_InCsumErrors - || snmp_root.udplite_RcvbufErrors - || snmp_root.udplite_SndbufErrors - || snmp_root.udplite_IgnoredMulti - ))) { + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.udplite_InDatagrams || + snmp_root.udplite_OutDatagrams || + snmp_root.udplite_NoPorts || + snmp_root.udplite_InErrors || + snmp_root.udplite_InCsumErrors || + snmp_root.udplite_RcvbufErrors || + snmp_root.udplite_SndbufErrors || + snmp_root.udplite_IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_packets = CONFIG_BOOLEAN_YES; { diff --git a/collectors/proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c index f0084aa26..445e0dcab 100644 --- a/collectors/proc.plugin/proc_net_snmp6.c +++ b/collectors/proc.plugin/proc_net_snmp6.c @@ -277,7 +277,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (Ip6InOctets || Ip6OutOctets))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (Ip6InOctets || + Ip6OutOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -311,7 +314,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (Ip6InReceives || Ip6OutRequests || Ip6InDelivers || Ip6OutForwDatagrams))) { + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && + (Ip6InReceives || + Ip6OutRequests || + Ip6InDelivers || + Ip6OutForwDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -351,7 +359,11 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (Ip6FragOKs || Ip6FragFails || Ip6FragCreates))) { + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && + (Ip6FragOKs || + Ip6FragFails || + Ip6FragCreates || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_ok = NULL, @@ -389,13 +401,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO - && ( - Ip6ReasmOKs - || Ip6ReasmFails - || Ip6ReasmTimeout - || Ip6ReasmReqds - ))) { + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && + (Ip6ReasmOKs || + Ip6ReasmFails || + Ip6ReasmTimeout || + Ip6ReasmReqds || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -436,17 +447,16 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO - && ( - Ip6InDiscards - || Ip6OutDiscards - || Ip6InHdrErrors - || Ip6InAddrErrors - || Ip6InUnknownProtos - || Ip6InTooBigErrors - || Ip6InTruncatedPkts - || Ip6InNoRoutes - ))) { + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && + (Ip6InDiscards || + Ip6OutDiscards || + Ip6InHdrErrors || + Ip6InAddrErrors || + Ip6InUnknownProtos || + Ip6InTooBigErrors || + Ip6InTruncatedPkts || + Ip6InNoRoutes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InDiscards = NULL, @@ -502,7 +512,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (Udp6InDatagrams || Udp6OutDatagrams))) { + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && + (Udp6InDatagrams || + Udp6OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -536,15 +549,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO - && ( - Udp6InErrors - || Udp6NoPorts - || Udp6RcvbufErrors - || Udp6SndbufErrors - || Udp6InCsumErrors - || Udp6IgnoredMulti - ))) { + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && + (Udp6InErrors || + Udp6NoPorts || + Udp6RcvbufErrors || + Udp6SndbufErrors || + Udp6InCsumErrors || + Udp6IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_RcvbufErrors = NULL, @@ -591,7 +603,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (UdpLite6InDatagrams || UdpLite6OutDatagrams))) { + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && + (UdpLite6InDatagrams || + UdpLite6OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -625,15 +640,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO - && ( - UdpLite6InErrors - || UdpLite6NoPorts - || UdpLite6RcvbufErrors - || UdpLite6SndbufErrors - || Udp6InCsumErrors - || UdpLite6InCsumErrors - ))) { + if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO && + (UdpLite6InErrors || + UdpLite6NoPorts || + UdpLite6RcvbufErrors || + UdpLite6SndbufErrors || + Udp6InCsumErrors || + UdpLite6InCsumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_RcvbufErrors = NULL, @@ -677,7 +691,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastOctets || Ip6InMcastOctets))) { + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && + (Ip6OutMcastOctets || + Ip6InMcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InMcastOctets = NULL, @@ -712,7 +729,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (Ip6OutBcastOctets || Ip6InBcastOctets))) { + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && + (Ip6OutBcastOctets || + Ip6InBcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InBcastOctets = NULL, @@ -747,7 +767,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastPkts || Ip6InMcastPkts))) { + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && + (Ip6OutMcastPkts || + Ip6InMcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InMcastPkts = NULL, @@ -782,7 +805,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && (Icmp6InMsgs || Icmp6OutMsgs))) { + if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && + (Icmp6InMsgs || + Icmp6OutMsgs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Icmp6InMsgs = NULL, @@ -816,7 +842,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && (Icmp6InRedirects || Icmp6OutRedirects))) { + if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && + (Icmp6InRedirects || + Icmp6OutRedirects || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_redir = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Icmp6InRedirects = NULL, @@ -850,20 +879,19 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InErrors - || Icmp6OutErrors - || Icmp6InCsumErrors - || Icmp6InDestUnreachs - || Icmp6InPktTooBigs - || Icmp6InTimeExcds - || Icmp6InParmProblems - || Icmp6OutDestUnreachs - || Icmp6OutPktTooBigs - || Icmp6OutTimeExcds - || Icmp6OutParmProblems - ))) { + if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO && + (Icmp6InErrors || + Icmp6OutErrors || + Icmp6InCsumErrors || + Icmp6InDestUnreachs || + Icmp6InPktTooBigs || + Icmp6InTimeExcds || + Icmp6InParmProblems || + Icmp6OutDestUnreachs || + Icmp6OutPktTooBigs || + Icmp6OutTimeExcds || + Icmp6OutParmProblems || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InErrors = NULL, @@ -924,13 +952,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InEchos - || Icmp6OutEchos - || Icmp6InEchoReplies - || Icmp6OutEchoReplies - ))) { + if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO && + (Icmp6InEchos || + Icmp6OutEchos || + Icmp6InEchoReplies || + Icmp6OutEchoReplies || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_echos = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InEchos = NULL, @@ -970,15 +997,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InGroupMembQueries - || Icmp6OutGroupMembQueries - || Icmp6InGroupMembResponses - || Icmp6OutGroupMembResponses - || Icmp6InGroupMembReductions - || Icmp6OutGroupMembReductions - ))) { + if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO && + (Icmp6InGroupMembQueries || + Icmp6OutGroupMembQueries || + Icmp6InGroupMembResponses || + Icmp6OutGroupMembResponses || + Icmp6InGroupMembReductions || + Icmp6OutGroupMembReductions || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_groupmemb = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InQueries = NULL, @@ -1023,13 +1049,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InRouterSolicits - || Icmp6OutRouterSolicits - || Icmp6InRouterAdvertisements - || Icmp6OutRouterAdvertisements - ))) { + if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO && + (Icmp6InRouterSolicits || + Icmp6OutRouterSolicits || + Icmp6InRouterAdvertisements || + Icmp6OutRouterAdvertisements || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_router = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InSolicits = NULL, @@ -1069,13 +1094,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InNeighborSolicits - || Icmp6OutNeighborSolicits - || Icmp6InNeighborAdvertisements - || Icmp6OutNeighborAdvertisements - ))) { + if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO && + (Icmp6InNeighborSolicits || + Icmp6OutNeighborSolicits || + Icmp6InNeighborAdvertisements || + Icmp6OutNeighborAdvertisements || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_neighbor = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InSolicits = NULL, @@ -1115,7 +1139,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && (Icmp6InMLDv2Reports || Icmp6OutMLDv2Reports))) { + if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && + (Icmp6InMLDv2Reports || + Icmp6OutMLDv2Reports || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_mldv2 = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InMLDv2Reports = NULL, @@ -1149,19 +1176,18 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InType1 - || Icmp6InType128 - || Icmp6InType129 - || Icmp6InType136 - || Icmp6OutType1 - || Icmp6OutType128 - || Icmp6OutType129 - || Icmp6OutType133 - || Icmp6OutType135 - || Icmp6OutType143 - ))) { + if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO && + (Icmp6InType1 || + Icmp6InType128 || + Icmp6InType129 || + Icmp6InType136 || + Icmp6OutType1 || + Icmp6OutType128 || + Icmp6OutType129 || + Icmp6OutType133 || + Icmp6OutType135 || + Icmp6OutType143 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InType1 = NULL, @@ -1219,13 +1245,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO - && ( - Ip6InNoECTPkts - || Ip6InECT1Pkts - || Ip6InECT0Pkts - || Ip6InCEPkts - ))) { + if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO && + (Ip6InNoECTPkts || + Ip6InECT1Pkts || + Ip6InECT0Pkts || + Ip6InCEPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ect = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InNoECTPkts = NULL, diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c index ff9cc5230..994cbad7b 100644 --- a/collectors/proc.plugin/proc_net_sockstat.c +++ b/collectors/proc.plugin/proc_net_sockstat.c @@ -218,7 +218,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.sockets_used)) { + if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.sockets_used || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -250,7 +252,12 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat_root.tcp_inuse || sockstat_root.tcp_orphan || sockstat_root.tcp_tw || sockstat_root.tcp_alloc))) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.tcp_inuse || + sockstat_root.tcp_orphan || + sockstat_root.tcp_tw || + sockstat_root.tcp_alloc || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -291,7 +298,8 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.tcp_mem)) { + if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.tcp_mem || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -323,7 +331,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_inuse)) { + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udp_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -355,7 +365,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_mem)) { + if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udp_mem || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -387,7 +399,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udplite_inuse)) { + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udplite_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -419,7 +433,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.raw_inuse)) { + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.raw_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_raw_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -451,7 +467,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_inuse)) { + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.frag_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -483,7 +501,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_memory)) { + if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.frag_memory || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c index 687b9bdeb..ce8c9e093 100644 --- a/collectors/proc.plugin/proc_net_sockstat6.c +++ b/collectors/proc.plugin/proc_net_sockstat6.c @@ -111,7 +111,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat6_root.tcp6_inuse))) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.tcp6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -143,7 +145,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udp6_inuse)) { + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.udp6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -175,7 +179,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udplite6_inuse)) { + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.udplite6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -207,7 +213,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.raw6_inuse)) { + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.raw6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_raw_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -239,7 +247,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.frag6_inuse)) { + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.frag6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c index 312ded5ba..f5030f99c 100644 --- a/collectors/proc.plugin/proc_net_stat_synproxy.c +++ b/collectors/proc.plugin/proc_net_stat_synproxy.c @@ -59,7 +59,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_entries == CONFIG_BOOLEAN_AUTO && events) || do_entries == CONFIG_BOOLEAN_YES) { + if(do_entries == CONFIG_BOOLEAN_YES || (do_entries == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_entries = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -89,7 +90,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_syns == CONFIG_BOOLEAN_AUTO && events) || do_syns == CONFIG_BOOLEAN_YES) { + if(do_syns == CONFIG_BOOLEAN_YES || (do_syns == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_syns = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -119,7 +121,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_reopened == CONFIG_BOOLEAN_AUTO && events) || do_reopened == CONFIG_BOOLEAN_YES) { + if(do_reopened == CONFIG_BOOLEAN_YES || (do_reopened == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_reopened = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -149,7 +152,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_cookies == CONFIG_BOOLEAN_AUTO && events) || do_cookies == CONFIG_BOOLEAN_YES) { + if(do_cookies == CONFIG_BOOLEAN_YES || (do_cookies == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_cookies = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c index c6557289d..32ff36b76 100644 --- a/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -124,6 +124,8 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename); show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO); + if(show_zero_charts == CONFIG_BOOLEAN_AUTO && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES) + show_zero_charts = CONFIG_BOOLEAN_YES; if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES)) do_zfs_stats = 1; } diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c index a9712b242..7def02ddf 100644 --- a/collectors/proc.plugin/proc_vmstat.c +++ b/collectors/proc.plugin/proc_vmstat.c @@ -43,7 +43,9 @@ int do_proc_vmstat(int update_every, usec_t dt) { arl_expect(arl_base, "pswpin", &pswpin); arl_expect(arl_base, "pswpout", &pswpout); - if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && get_numa_node_count() >= 2)) { + if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && + (get_numa_node_count() >= 2 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { arl_expect(arl_base, "numa_foreign", &numa_foreign); arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local); arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults); @@ -91,7 +93,9 @@ int do_proc_vmstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(pswpin || pswpout || do_swapio == CONFIG_BOOLEAN_YES) { + if(do_swapio == CONFIG_BOOLEAN_YES || (do_swapio == CONFIG_BOOLEAN_AUTO && + (pswpin || pswpout || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_swapio = CONFIG_BOOLEAN_YES; static RRDSET *st_swapio = NULL; diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c index 03cbfff83..b11148375 100644 --- a/collectors/proc.plugin/sys_devices_system_edac_mc.c +++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -128,7 +128,8 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && ce_sum > 0)) { + if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && + (ce_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ce = CONFIG_BOOLEAN_YES; static RRDSET *ce_st = NULL; @@ -166,7 +167,8 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && ue_sum > 0)) { + if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && + (ue_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ue = CONFIG_BOOLEAN_YES; static RRDSET *ue_st = NULL; diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c index 6e6d0acca..ff408ed88 100644 --- a/collectors/proc.plugin/sys_devices_system_node.c +++ b/collectors/proc.plugin/sys_devices_system_node.c @@ -83,7 +83,8 @@ int do_proc_sys_devices_system_node(int update_every, usec_t dt) { hash_numa_miss = simple_hash("numa_miss"); } - if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { + if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && + (numa_node_count >= 2 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { for(m = numa_root; m; m = m->next) { if(m->numastat_filename) { diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c index cb25ad440..4e58a1a4c 100644 --- a/collectors/proc.plugin/sys_fs_btrfs.c +++ b/collectors/proc.plugin/sys_fs_btrfs.c @@ -542,7 +542,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/disks - if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && node->all_disks_total && node->allocation_data_disk_total)) { + if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && + ((node->all_disks_total && node->allocation_data_disk_total) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_disks = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_disks)) { @@ -598,7 +600,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/data - if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && node->allocation_data_total_bytes)) { + if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && + (node->allocation_data_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_data = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_data)) { @@ -639,7 +643,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/metadata - if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && node->allocation_metadata_total_bytes)) { + if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && + (node->allocation_metadata_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_metadata = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_metadata)) { @@ -682,7 +688,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/system - if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && node->allocation_system_total_bytes)) { + if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && + (node->allocation_system_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_system = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_system)) { diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c index 0b64987c9..0a93f54ee 100644 --- a/collectors/proc.plugin/sys_kernel_mm_ksm.c +++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c @@ -89,7 +89,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { offered = pages_sharing + pages_shared + pages_unshared + pages_volatile; saved = pages_sharing; - if(unlikely(!offered /*|| !pages_to_scan*/)) return 0; + if(unlikely(!offered /*|| !pages_to_scan*/ && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_NO)) return 0; // -------------------------------------------------------------------- @@ -192,7 +192,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { else rrdset_next(st_mem_ksm_ratios); - rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered); + rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, offered ? (saved * 1000000) / offered : 0); rrdset_done(st_mem_ksm_ratios); } diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf index 53858ae2e..2dded40ae 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.conf +++ b/collectors/python.d.plugin/mongodb/mongodb.conf @@ -84,9 +84,9 @@ local: port : 27017 # authsample: -# name : 'secure' -# host : 'mongodb.example.com' -# port : 27017 +# name : 'secure' +# host : 'mongodb.example.com' +# port : 27017 # authdb : 'admin' -# user : 'monitor' -# password : 'supersecret' +# user : 'monitor' +# pass : 'supersecret' diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py index 27519b76a..3b94fcdf2 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py @@ -4,6 +4,7 @@ # Author: Ilya Mashchenko (ilyam8) # SPDX-License-Identifier: GPL-3.0-or-later +import errno import socket try: @@ -181,7 +182,8 @@ class SocketService(SimpleService): self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all self._sock.close() except Exception as error: - self.error(error) + if not (hasattr(error, 'errno') and error.errno == errno.ENOTCONN): + self.error(error) self._sock = None def _send(self, request=None): diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py index ee2fb68b3..80cc1cf18 100644 --- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py @@ -131,6 +131,15 @@ class Server: return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d) +# https://pypi.org/project/rethinkdb/2.4.0/ +# rdb.RethinkDB() can be used as rdb drop in replacement. +# https://github.com/rethinkdb/rethinkdb-python#quickstart +def get_rethinkdb(): + if hasattr(rdb, 'RethinkDB'): + return rdb.RethinkDB() + return rdb + + class Service(SimpleService): def __init__(self, configuration=None, name=None): SimpleService.__init__(self, configuration=configuration, name=name) @@ -141,6 +150,7 @@ class Service(SimpleService): self.user = self.configuration.get('user', 'admin') self.password = self.configuration.get('password') self.timeout = self.configuration.get('timeout', 2) + self.rdb = None self.conn = None self.alive = True @@ -149,6 +159,9 @@ class Service(SimpleService): self.error('"rethinkdb" module is needed to use rethinkdbs.py') return False + self.debug("rethinkdb driver version {0}".format(rdb.__version__)) + self.rdb = get_rethinkdb() + if not self.connect(): return None @@ -196,14 +209,14 @@ class Service(SimpleService): def get_stats(self): try: - return list(rdb.db('rethinkdb').table('stats').run(self.conn).items) + return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items) except rdb.errors.ReqlError: self.alive = False return None def connect(self): try: - self.conn = rdb.connect( + self.conn = self.rdb.connect( host=self.host, port=self.port, user=self.user, diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py index 02e88e6a4..6b54ea601 100644 --- a/collectors/python.d.plugin/sensors/sensors.chart.py +++ b/collectors/python.d.plugin/sensors/sensors.chart.py @@ -92,7 +92,7 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = list() self.definitions = dict() - self.chips = list() + self.chips = configuration.get('chips') def get_data(self): data = dict() diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py index 12f756c58..f121ab2e0 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -558,6 +558,7 @@ class DiskLogFile: class BaseDisk: def __init__(self, name, log_file): + self.raw_name = name self.name = re.sub(r'_+', '_', name) self.log_file = log_file self.attrs = list() @@ -566,8 +567,8 @@ class BaseDisk: def __eq__(self, other): if isinstance(other, BaseDisk): - return self.name == other.name - return self.name == other + return self.raw_name == other.raw_name + return self.raw_name == other def __ne__(self, other): return not self == other @@ -657,7 +658,7 @@ class Service(SimpleService): not disk.log_file.is_active(current_time, self.age), ] ): - self.disks.remove(disk.name) + self.disks.remove(disk.raw_name) self.remove_disk_from_charts(disk) def scan(self): @@ -681,9 +682,11 @@ class Service(SimpleService): path = os.path.join(self.log_path, full_name) if name in self.disks: + self.debug('skipping {0}: already in disks'.format(full_name)) return None if [p for p in self.exclude if p in name]: + self.debug('skipping {0}: filtered by `exclude` option'.format(full_name)) return None if not os.access(path, os.R_OK): @@ -747,5 +750,4 @@ class Service(SimpleService): if not chart_id or chart_id not in self.charts: continue - # TODO: can't delete dimension - self.charts[chart_id].hide_dimension('{0}_{1}'.format(disk.name, attr.name)) + self.charts[chart_id].del_dimension('{0}_{1}'.format(disk.name, attr.name)) diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py index dade2b204..6e5a22c58 100644 --- a/collectors/python.d.plugin/unbound/unbound.chart.py +++ b/collectors/python.d.plugin/unbound/unbound.chart.py @@ -253,15 +253,19 @@ class Service(SocketService): else: self.request = b'UBCT1 status\n' raw = self._get_raw_data() - for line in raw.splitlines(): - if line.startswith('threads'): - self.threads = int(line.split()[1]) - self._generate_perthread_charts() - break - if self.threads is None: - self.info('Unable to auto-detect thread counts, disabling per-thread stats.') - self.perthread = False - self.request = tmp + if raw is None: + result = False + self.warning('Received no data from socket.') + else: + for line in raw.splitlines(): + if line.startswith('threads'): + self.threads = int(line.split()[1]) + self._generate_perthread_charts() + break + if self.threads is None: + self.info('Unable to auto-detect thread counts, disabling per-thread stats.') + self.perthread = False + self.request = tmp return result @staticmethod @@ -274,10 +278,13 @@ class Service(SocketService): raw = self._get_raw_data() data = dict() tmp = dict() - for line in raw.splitlines(): - stat = line.split('=') - tmp[stat[0]] = stat[1] - for item in self.statmap: - if item in tmp: - data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + if raw is not None: + for line in raw.splitlines(): + stat = line.split('=') + tmp[stat[0]] = stat[1] + for item in self.statmap: + if item in tmp: + data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + else: + self.warning('Received no data from socket.') return data diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index 534466a04..78f0e9807 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -1067,7 +1067,7 @@ static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) { } static STATSD_APP_CHART_DIM *add_dimension_to_app_chart( - STATSD_APP *app + STATSD_APP *app __maybe_unused , STATSD_APP_CHART *chart , const char *metric_name , const char *dim_name diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md index e71944e3c..4133b4f8d 100644 --- a/collectors/tc.plugin/README.md +++ b/collectors/tc.plugin/README.md @@ -191,7 +191,7 @@ Add the following configuration option in `/etc/netdata.conf`: Finally, create `/etc/netdata/tc-qos-helper.conf` with this content: ```tc_show="class"``` -Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c index 1a0ced9a5..50383f4ce 100644 --- a/collectors/tc.plugin/plugin_tc.c +++ b/collectors/tc.plugin/plugin_tc.c @@ -382,7 +382,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // bytes - if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && bytes_sum)) { + if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && + (bytes_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_bytes = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_bytes)) @@ -425,7 +427,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // packets - if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && packets_sum)) { + if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && + (packets_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_packets = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_packets)) { @@ -478,7 +482,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // dropped - if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && dropped_sum)) { + if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && + (dropped_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_dropped = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_dropped)) { @@ -531,7 +537,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // tokens - if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && tokens_sum)) { + if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && + (tokens_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_tokens = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_tokens)) { @@ -585,7 +593,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // ctokens - if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && ctokens_sum)) { + if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && + (ctokens_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_ctokens = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_ctokens)) { @@ -874,9 +884,6 @@ void *tc_main(void *ptr) { uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP"); uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME"); uint32_t WORKTIME_HASH = simple_hash("WORKTIME"); -#ifdef DETACH_PLUGINS_FROM_NETDATA - uint32_t MYPID_HASH = simple_hash("MYPID"); -#endif uint32_t first_hash; snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir); @@ -1119,17 +1126,6 @@ void *tc_main(void *ptr) { rrdset_done(sttime); } -#ifdef DETACH_PLUGINS_FROM_NETDATA - else if(unlikely(first_hash == MYPID_HASH && (strcmp(words[0], "MYPID") == 0))) { - // debug(D_TC_LOOP, "MYPID line '%s'", words[1]); - char *id = words[1]; - pid_t pid = atol(id); - - if(likely(pid)) tc_child_pid = pid; - - debug(D_TC_LOOP, "TC: Child PID is %d.", tc_child_pid); - } -#endif //else { // debug(D_TC_LOOP, "IGNORED line"); //} diff --git a/configs.signatures b/configs.signatures index b0ded05e0..e29883ac5 100644 --- a/configs.signatures +++ b/configs.signatures @@ -381,7 +381,7 @@ declare -A configs_signatures=( ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf' ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf' ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf' - ['ce285c90747428ee5da4efb547418dda']='health.d/dbengine.conf' + ['93674f3206872ae9c43ecbc54988413b']='health.d/dbengine.conf' ['7fb8184d56a27040e73261ed9c6fc76f']='health_alarm_notify.conf' ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf' ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf' diff --git a/configure.ac b/configure.ac index b922ad5bf..c65e406fd 100644 --- a/configure.ac +++ b/configure.ac @@ -437,6 +437,14 @@ else AC_DEFINE_UNQUOTED([unlikely(x)], [(x)], [gcc branch optimization]) fi +if test "${GCC}" = "yes"; then + AC_DEFINE([__always_unused], [__attribute__((unused))], [gcc unused attribute]) + AC_DEFINE([__maybe_unused], [__attribute__((unused))], [gcc unused attribute]) +else + AC_DEFINE([__always_unused], [], [dummy unused attribute]) + AC_DEFINE([__maybe_unused], [], [dummy unused attribute]) +fi + if test "${enable_pedantic}" = "yes"; then enable_strict="yes" CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long" @@ -816,17 +824,17 @@ if test "${have_libaws_cpp_sdk_core}" = "yes" -a "${have_libcrypto}" = "yes" -a CXXFLAGS="${CXXFLAGS} -std=c++11" AC_TRY_LINK( - [ - #include - #include - #include - #include - #include - #include - ], - [Aws::Kinesis::Model::PutRecordRequest request;], - [have_libaws_cpp_sdk_kinesis=yes], - [have_libaws_cpp_sdk_kinesis=no] + [ + #include + #include + #include + #include + #include + #include + ], + [Aws::Kinesis::Model::PutRecordRequest request;], + [have_libaws_cpp_sdk_kinesis=yes], + [have_libaws_cpp_sdk_kinesis=no] ) LIBS="${save_LIBS}" @@ -879,12 +887,40 @@ PKG_CHECK_MODULES( [have_libprotobuf=no] ) -PKG_CHECK_MODULES( - [SNAPPY], - [snappy], - [have_libsnappy=yes], - [have_libsnappy=no] -) +AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy]) + + AC_LANG_SAVE + AC_LANG_CPLUSPLUS + save_LIBS="${LIBS}" + LIBS="-lsnappy" + save_CXXFLAGS="${CXXFLAGS}" + CXXFLAGS="${CXXFLAGS} -std=c++11" + + AC_TRY_LINK( + [ + #include + #include + ], + [ + const char *input = "test"; + size_t compressed_length; + char *buffer = (char *)malloc(5 * sizeof(char)); + snappy::RawCompress(input, 4, buffer, &compressed_length); + free(buffer); + ], + [ + have_libsnappy=yes + SNAPPY_CFLAGS="" + SNAPPY_LIBS="-lsnappy" + ], + [have_libsnappy=no] + ) + + LIBS="${save_LIBS}" + CXXFLAGS="${save_CXXFLAGS}" + AC_LANG_RESTORE + +AC_MSG_RESULT([${have_libsnappy}]) AC_PATH_PROG([PROTOC], [protoc], [no]) AS_IF( @@ -919,7 +955,7 @@ if test "${enable_backend_prometeus_remote_write}" != "no" -a "${have_libprotobu AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability]) OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS}" CXX11FLAG="-std=c++11" - OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS} " + OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS}" else enable_backend_prometheus_remote_write="no" fi diff --git a/contrib/debian/changelog b/contrib/debian/changelog new file mode 100644 index 000000000..d9cf8bdbe --- /dev/null +++ b/contrib/debian/changelog @@ -0,0 +1,5 @@ +netdata (PREVIOUS_PACKAGE_VERSION) unstable; urgency=medium + + * Initial Release + +-- Netdata Builder PREVIOUS_PACKAGE_DATE diff --git a/contrib/debian/control b/contrib/debian/control index 0f4f1bc05..ed5bd94b8 100644 --- a/contrib/debian/control +++ b/contrib/debian/control @@ -4,18 +4,51 @@ Build-Depends: debhelper (>= 9), dh-systemd (>= 1.5), dpkg-dev (>= 1.13.19), zlib1g-dev, - uuid-dev + uuid-dev, + libuv1-dev, + liblz4-dev, + libjudy-dev, + libssl-dev, + libmnl-dev, + libjson-c-dev, + libcups2-dev, + libipmimonitoring-dev, + libnetfilter-acct-dev, + libsnappy-dev, + libprotobuf-dev, + libprotoc-dev, + autogen, + autoconf, + automake, + pkg-config, + curl, + gcc, + g++ Section: net Priority: optional -Maintainer: Costa Tsaousis +Maintainer: Netdata Builder Standards-Version: 3.9.6 -Homepage: https://github.com/netdata/netdata/wiki +Homepage: https://netdata.cloud Package: netdata Architecture: any Depends: adduser, libcap2-bin (>= 1:2.0), lsb-base (>= 3.1-23.2), + zlib1g, + libuuid1, + libuv1, + liblz4-1, + libjudydebian1, + openssl, + libmnl0, + libjson-c3, + cups, + freeipmi, + libnetfilter-acct1, + libprotobuf-c1, + libsnappy1v5, + libprotoc10, ${misc:Depends}, ${shlibs:Depends} Description: real-time charts for system monitoring diff --git a/contrib/debian/control.jessie b/contrib/debian/control.jessie new file mode 100644 index 000000000..ced85d20e --- /dev/null +++ b/contrib/debian/control.jessie @@ -0,0 +1,56 @@ +Source: netdata +Build-Depends: debhelper (>= 9), + dh-autoreconf, + dh-systemd (>= 1.5), + dpkg-dev (>= 1.13.19), + zlib1g-dev, + uuid-dev, + liblz4-dev, + libjudy-dev, + libssl-dev, + libmnl-dev, + libjson-c-dev, + libcups2-dev, + libipmimonitoring-dev, + libnetfilter-acct-dev, + libsnappy-dev, + libprotobuf-dev, + libprotoc-dev, + autogen, + autoconf, + automake, + pkg-config, + curl, + gcc, + g++ +Section: net +Priority: optional +Maintainer: Costa Tsaousis +Standards-Version: 3.9.6 +Homepage: https://github.com/netdata/netdata/wiki + +Package: netdata +Architecture: any +Depends: adduser, + libcap2-bin (>= 1:2.0), + lsb-base (>= 3.1-23.2), + zlib1g, + libuuid1, + liblz4-1, + libjudydebian1, + openssl, + libmnl0, + libjson-c3, + cups, + freeipmi, + libnetfilter-acct1, + libprotobuf-c1, + libsnappy1v5, + libprotoc10, + ${misc:Depends}, + ${shlibs:Depends} +Description: real-time charts for system monitoring + Netdata is a daemon that collects data in realtime (per second) + and presents a web site to view and analyze them. The presentation + is also real-time and full of interactive charts that precisely + render all collected values. diff --git a/contrib/debian/install_go.sh b/contrib/debian/install_go.sh new file mode 100755 index 000000000..17a3b409e --- /dev/null +++ b/contrib/debian/install_go.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +LIB_DIR="$1" +LIBEXEC_DIR="$2" + +# ############################################################ +# Package Go within netdata (TBD: Package it separately) +safe_sha256sum() { + # Within the contexct of the installer, we only use -c option that is common between the two commands + # We will have to reconsider if we start non-common options + if command -v sha256sum >/dev/null 2>&1; then + sha256sum $@ + elif command -v shasum >/dev/null 2>&1; then + shasum -a 256 $@ + else + fatal "I could not find a suitable checksum binary to use" + fi +} + +download_go() { + url="${1}" + dest="${2}" + + if command -v curl >/dev/null 2>&1; then + curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}" + elif command -v wget >/dev/null 2>&1; then + wget -T 15 -O - "${url}" > "${dest}" + else + echo >&2 + echo >&2 "Downloading go.d plugin from '${url}' failed because of missing mandatory packages." + echo >&2 "Either add packages or disable it by issuing '--disable-go' in the installer" + echo >&2 + exit 1 + fi +} + +install_go() { + # When updating this value, ensure correct checksums in packaging/go.d.checksums + GO_PACKAGE_VERSION="v0.7.0" + ARCH_MAP=( + 'i386::386' + 'i686::386' + 'x86_64::amd64' + 'aarch64::arm64' + 'armv64::arm64' + 'armv6l::arm' + 'armv7l::arm' + 'armv5tel::arm' + ) + + if [ -z "${NETDATA_DISABLE_GO+x}" ]; then + echo >&2 "Install go.d.plugin" + ARCH=$(uname -m) + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + + for index in "${ARCH_MAP[@]}" ; do + KEY="${index%%::*}" + VALUE="${index##*::}" + if [ "$KEY" = "$ARCH" ]; then + ARCH="${VALUE}" + break + fi + done + tmp=$(mktemp -d /tmp/netdata-go-XXXXXX) + GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}" + download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}" + download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz" + + if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then + echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer" + echo >&2 + return 1 + fi + + grep "${GO_PACKAGE_BASENAME}\$" "packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null + grep "config.tar.gz" "packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null + + # Checksum validation + if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then + + echo >&2 "go.d plugin checksum validation failure." + echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer" + echo >&2 + + echo "go.d.plugin package files checksum validation failed." + exit 1 + fi + + # Install files + tar -xf "${tmp}/config.tar.gz" -C "${LIB_DIR}/conf.d/" + mv "${tmp}/$GO_PACKAGE_BASENAME" "${LIBEXEC_DIR}/plugins.d/go.d.plugin" + fi + return 0 +} + +install_go diff --git a/contrib/debian/netdata.postinst.in b/contrib/debian/netdata.postinst.in index 29615f541..44b53ccfb 100644 --- a/contrib/debian/netdata.postinst.in +++ b/contrib/debian/netdata.postinst.in @@ -28,9 +28,10 @@ case "$1" in fi dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry - chown -R root:netdata /usr/share/netdata/* - chown -R root:netdata /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d - setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d/apps.plugin + chown -R root:netdata /usr/share/netdata + chown -R root:netdata /usr/libexec/netdata/plugins.d + chown -R root:netdata /var/lib/netdata/www + setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/libexec/netdata/plugins.d/apps.plugin #PERMS# ;; diff --git a/contrib/debian/rules b/contrib/debian/rules index c19323960..88a8ab36a 100755 --- a/contrib/debian/rules +++ b/contrib/debian/rules @@ -17,7 +17,9 @@ TOP = $(CURDIR)/debian/netdata #dh $@ --with autoreconf override_dh_auto_configure: - dh_auto_configure -- --with-math --with-webdir=/var/lib/netdata/www + autoreconf -ivf + dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var \ + --libexecdir=/usr/libexec --with-user=netdata --with-math --with-webdir=/var/lib/netdata/www debian/%.postinst: debian/%.postinst.in sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' $< > $@ @@ -34,7 +36,7 @@ override_dh_install: debian/netdata.postinst mkdir -p "$(TOP)/usr/share/netdata" for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type d -printf '%f '); do \ echo Relocating $$D; \ - mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/$$D"; \ + mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/www/$$D"; \ ln -s "/usr/share/netdata/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \ done @@ -51,6 +53,10 @@ override_dh_install: debian/netdata.postinst done sed -i "/^#PERMS#/d" $(CURDIR)/debian/netdata.postinst + # Install go + # + debian/install_go.sh $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/ $(TOP)/usr/libexec/netdata + override_dh_installdocs: dh_installdocs @@ -58,10 +64,11 @@ override_dh_installdocs: -name README.md \ -not -path './.travis/*' \ -not -path './debian/*' \ + -not -path './contrib/*' \ -exec cp \ - --parents \ - --target $(TOP)/usr/share/doc/netdata/ \ - {} \; + --parents \ + --target $(TOP)/usr/share/doc/netdata/ \ + {} \; override_dh_fixperms: dh_fixperms @@ -69,7 +76,10 @@ override_dh_fixperms: # apps.plugin should only be runnable by the netdata user. It will be # given extra capabilities in the postinst script. # - chmod 0754 $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/apps.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/apps.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/freeipmi.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/perf.plugin + chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/go.d.plugin override_dh_installlogrotate: cp system/netdata.logrotate debian/netdata.logrotate diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in index 7d73f6d61..f16c85a45 100755 --- a/daemon/anonymous-statistics.sh.in +++ b/daemon/anonymous-statistics.sh.in @@ -25,24 +25,6 @@ fi # Shorten version for easier reporting NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v') -echo "&av=${NETDATA_VERSION}\ -&ec=${ACTION}\ -&ea=${ACTION_RESULT}\ -&el=${ACTION_DATA}\ -&cd1=${NETDATA_SYSTEM_OS_NAME}\ -&cd2=${NETDATA_SYSTEM_OS_ID}\ -&cd3=${NETDATA_SYSTEM_OS_ID_LIKE}\ -&cd4=${NETDATA_SYSTEM_OS_VERSION}\ -&cd5=${NETDATA_SYSTEM_OS_VERSION_ID}\ -&cd6=${NETDATA_SYSTEM_OS_DETECTION}\ -&cd7=${NETDATA_SYSTEM_KERNEL_NAME}\ -&cd8=${NETDATA_SYSTEM_KERNEL_VERSION}\ -&cd9=${NETDATA_SYSTEM_ARCHITECTURE}\ -&cd10=${NETDATA_SYSTEM_VIRTUALIZATION}\ -&cd11=${NETDATA_SYSTEM_VIRT_DETECTION}\ -&cd12=${NETDATA_SYSTEM_CONTAINER}\ -&cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}" >> /tmp/as.log - # ------------------------------------------------------------------------------------------------- # send the anonymous statistics to GA # https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters diff --git a/daemon/common.h b/daemon/common.h index a15ddb877..dfbd6cfef 100644 --- a/daemon/common.h +++ b/daemon/common.h @@ -78,6 +78,7 @@ extern char *netdata_configured_varlib_dir; extern char *netdata_configured_home_dir; extern char *netdata_configured_host_prefix; extern char *netdata_configured_timezone; +extern int netdata_zero_metrics_enabled; extern int netdata_anonymous_statistics_enabled; int netdata_ready; diff --git a/daemon/config/README.md b/daemon/config/README.md index 4778cad24..c36a5b6db 100644 --- a/daemon/config/README.md +++ b/daemon/config/README.md @@ -74,6 +74,7 @@ gap when lost iterations above | `1` | cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data. delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. +enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. ### [web] section options @@ -128,7 +129,7 @@ The configuration options for plugins appear in sections following the pattern ` Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information. -Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. +Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. #### External plugins diff --git a/daemon/main.c b/daemon/main.c index 0ced90810..bd0970fdd 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -2,6 +2,7 @@ #include "common.h" +int netdata_zero_metrics_enabled; int netdata_anonymous_statistics_enabled; struct config netdata_config = { @@ -1214,6 +1215,8 @@ int main(int argc, char **argv) { web_server_config_options(); + netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_BOOLEAN_NO); + for (i = 0; static_threads[i].name != NULL ; i++) { struct netdata_static_thread *st = &static_threads[i]; @@ -1227,7 +1230,7 @@ int main(int argc, char **argv) { info("netdata initialization completed. Enjoy real-time performance monitoring!"); netdata_ready = 1; - + send_statistics("START", "-", "-"); // ------------------------------------------------------------------------ diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c index 30eaa0ec6..d6e4f3174 100644 --- a/database/engine/journalfile.c +++ b/database/engine/journalfile.c @@ -3,13 +3,18 @@ static void flush_transaction_buffer_cb(uv_fs_t* req) { - struct generic_io_descriptor *io_descr; + struct generic_io_descriptor *io_descr = req->data; + struct rrdengine_worker_config* wc = req->loop->data; + struct rrdengine_instance *ctx = wc->ctx; debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); if (req->result < 0) { - fatal("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); + } else { + debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); } - io_descr = req->data; uv_fs_req_cleanup(req); free(io_descr->buf); @@ -348,6 +353,7 @@ static unsigned replay_transaction(struct rrdengine_instance *ctx, struct rrdeng ret = crc32cmp(jf_trailer->checksum, crc); debug(D_RRDENGINE, "Transaction %"PRIu64" was read from disk. CRC32 check: %s", *id, ret ? "FAILED" : "SUCCEEDED"); if (unlikely(ret)) { + error("Transaction %"PRIu64" was read from disk. CRC32 check: FAILED", *id); return size_bytes; } switch (jf_header->type) { diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c index 0f2dceaa4..221216bb3 100644 --- a/database/engine/rrdengine.c +++ b/database/engine/rrdengine.c @@ -37,24 +37,29 @@ void read_extent_cb(uv_fs_t* req) unsigned i, j, count; void *page, *uncompressed_buf = NULL; uint32_t payload_length, payload_offset, page_offset, uncompressed_payload_length; + uint8_t have_read_error = 0; /* persistent structures */ struct rrdeng_df_extent_header *header; struct rrdeng_df_extent_trailer *trailer; uLong crc; xt_io_descr = req->data; - if (req->result < 0) { - error("%s: uv_fs_read: %s", __func__, uv_strerror((int)req->result)); - goto cleanup; - } - header = xt_io_descr->buf; payload_length = header->payload_length; count = header->number_of_pages; - payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count; - trailer = xt_io_descr->buf + xt_io_descr->bytes - sizeof(*trailer); + + if (req->result < 0) { + struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile; + + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + have_read_error = 1; + error("%s: uv_fs_read - %s - extent at offset %"PRIu64"(%u) in datafile %u-%u.", __func__, + uv_strerror((int)req->result), xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno); + goto after_crc_check; + } crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, xt_io_descr->buf, xt_io_descr->bytes - sizeof(*trailer)); ret = crc32cmp(trailer->checksum, crc); @@ -66,12 +71,17 @@ void read_extent_cb(uv_fs_t* req) } #endif if (unlikely(ret)) { - /* TODO: handle errors */ - exit(UV_EIO); - goto cleanup; + struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile; + + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + have_read_error = 1; + error("%s: Extent at offset %"PRIu64"(%u) was read from datafile %u-%u. CRC32 check: FAILED", __func__, + xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno); } - if (RRD_NO_COMPRESSION != header->compression_algorithm) { +after_crc_check: + if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) { uncompressed_payload_length = 0; for (i = 0 ; i < count ; ++i) { uncompressed_payload_length += header->descr[i].page_length; @@ -99,7 +109,10 @@ void read_extent_cb(uv_fs_t* req) page_offset += header->descr[j].page_length; } /* care, we don't hold the descriptor mutex */ - if (RRD_NO_COMPRESSION == header->compression_algorithm) { + if (have_read_error) { + /* Applications should make sure NULL values match 0 as does SN_EMPTY_SLOT */ + memset(page, 0, descr->page_length); + } else if (RRD_NO_COMPRESSION == header->compression_algorithm) { (void) memcpy(page, xt_io_descr->buf + payload_offset + page_offset, descr->page_length); } else { (void) memcpy(page, uncompressed_buf + page_offset, descr->page_length); @@ -118,12 +131,11 @@ void read_extent_cb(uv_fs_t* req) } rrdeng_page_descr_mutex_unlock(ctx, descr); } - if (RRD_NO_COMPRESSION != header->compression_algorithm) { + if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) { freez(uncompressed_buf); } if (xt_io_descr->completion) complete(xt_io_descr->completion); -cleanup: uv_fs_req_cleanup(req); free(xt_io_descr->buf); freez(xt_io_descr); @@ -246,8 +258,9 @@ void flush_pages_cb(uv_fs_t* req) xt_io_descr = req->data; if (req->result < 0) { + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); - goto cleanup; } #ifdef NETDATA_INTERNAL_CHECKS { @@ -279,7 +292,6 @@ void flush_pages_cb(uv_fs_t* req) } if (xt_io_descr->completion) complete(xt_io_descr->completion); -cleanup: uv_fs_req_cleanup(req); free(xt_io_descr->buf); freez(xt_io_descr); diff --git a/database/rrddim.c b/database/rrddim.c index 088c80d0b..09f364b02 100644 --- a/database/rrddim.c +++ b/database/rrddim.c @@ -466,7 +466,7 @@ inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) { rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); } -inline void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd) { +inline void rrddim_isnot_obsolete(RRDSET *st __maybe_unused, RRDDIM *rd) { debug(D_RRD_CALLS, "rrddim_isnot_obsolete() for chart %s, dimension %s", st->name, rd->name); rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE); @@ -475,7 +475,7 @@ inline void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd) { // ---------------------------------------------------------------------------- // RRDDIM - collect values for a dimension -inline collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value) { +inline collected_number rrddim_set_by_pointer(RRDSET *st __maybe_unused, RRDDIM *rd, collected_number value) { debug(D_RRD_CALLS, "rrddim_set_by_pointer() for chart %s, dimension %s, value " COLLECTED_NUMBER_FORMAT, st->name, rd->name, value); now_realtime_timeval(&rd->last_collected_time); diff --git a/database/rrdvar.c b/database/rrdvar.c index 600bd34c4..95ab6859e 100644 --- a/database/rrdvar.c +++ b/database/rrdvar.c @@ -68,7 +68,8 @@ inline void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv) { freez(rv); } -inline RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) { +inline RRDVAR *rrdvar_create_and_index(const char *scope __maybe_unused, avl_tree_lock *tree, const char *name, + RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) { char *variable = strdupz(name); rrdvar_fix_name(variable); uint32_t hash = simple_hash(variable); diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml index 7f8008542..4d9c3ba35 100644 --- a/diagrams/netdata-overview.xml +++ b/diagrams/netdata-overview.xml @@ -1 +1 @@ -7X1Zc9s4vu9nuQ+u6nmgC/vymKU9PVXTc3JO99y55ykFgqCtaUlUS3Icz6e/ADeRICRLMiRTcpKqxKJkCsT/h/++3OBPs+9/XarFw69FZqY3CGTfb/DnG4QghtT+5648V1cExNWF++Ukqz+0ufDb5D+mvgjqq4+TzKx6H1wXxXQ9WfQv6mI+N3rdu6aWy+Kp/7G8mPa/daHuzeDCb1pNh1f/NcnWD/VTULC5/ouZ3D803wxB/U6q9B/3y+JxXn/fDcJ5+ad6e6aae9WfXz2orHjqXMI/3+BPy6JYVz/Nvn8yU7e3zbZVv3e35d123UszXwd+4Z8rs/yv9N9uyxCYqtSSrV6m+7XpZP5H9fphvXY7/cH9Irq7n6wfHtNbXczsi7lZZ2qtej+tl8bY/2ZqtTZL+4MuplP7HcVy5V48qOV6dZvdLqaP95O5+8XHdfWF7bKRbJ+9fabV+rkhh9vRxfDR6qf9ZpZr8z2EC5U2dwCd+//VFDOzXj7bz9W/hYmofq+GK2SivvC0oT6E9TY9dChP62uqBtx9e+/NprudqR6qedkhQyyy5JOleSimPbKk0yLtk2We32YdklS0uHXXPYJgsJMgFrYL9+NkVh6kj+X/H1aL6iwCe0U1L/LJd2Pv+tFRaWJP2N/d430pVpP1pJjb99NivbZPsPnAh+nk3r2xLhbNne2r/qPrafGY3VYb8Gg3zz7B2sKi3gu1Wpm1gx5iDGFAyp8oEpJYmN3hNEepBiwh9qcEQsMTaahKuAFESYiFxvx2Mb+3Xx6AXIXUAOiG4Opgh54dOv8w66di+ccNYlP7yY/Z5Jv98d79+M8vvzVX7f06bwQ++7tluqttn34zbMKd2LRkcet7WM/cVkD749KsJv+peYHD5qKYzNflrtOPN/SzQ+vj2iKylELuF1QNwanJ11uRuVooPZnf/+5efE6IvZJbFNayzMq7+nW9sABnPgRL3/uYeb7pcrsO0BgYAo2AkfCoCKKjWJj5ymkBPUigK2RWDDH7o4AcpQaXzEpUzEqkmiYq5QhJlHOI9XZmdSi8pAB9fHHAhnKQ8iHGML9UjC2e1w/FvIsxXTzqhyztQwwfBLEX8VM+3MdWWfxUTItleatGXdwGsQFUh2gOo9AhLynfWm3F4gaCGALMIIeJZsgwAFGiKMe1tEQpTDIsqISQU5anNQDLm3/sKsDzYm7iIFNAD5m0MTF6yGQB7keuB5lLu/DCYm1p+uAk18j/KHb8DwslCenyP0lNmiCkBU4ZFZxF5H8EeChjOGAHSBFAGR4JykK61tPkj8nmQpIXy+Rvxe99CNFL529HogyUUhZwxEWmuyhTIE8wRCgjQCHJ03gog5CSHswQaBwLHZiJgMkg2EhQtg8vezBqah/GfqRYT3JLaYcRR4KZWa0sBdPJMutjkL1XDPIKgxxTBLqaHoAgoZbhaE7z1MiIGKSc9SBI4BCBUNKAoidiQHDguKrVx29q+mi6aOzQ/+lhsja/WaPLvX5aqkXfyFtZ0fiH6RD5E/8Mf3a0yifTaXO91kjulyqb2N3rfvzTnf0TsOIaY1Dbz1tgW/tYrR4cLMqvXZjlxG6AWf5WmYM1Pgr3au02F7uXJQDL33GvWqefe3E/tbiof7Y4mejmQ+XzNN5HXH/vF7W23zWvDEoA44CB96UebazMnvOLw4DUay4egIZxaFKLh0W+mPXZj7hGLar8UQCWZViZrnwDOkuMptJwI3GWini8pZFcraqOA7wFkQCcTuxMrdxY7nz7Lq+0ufDhy6/ttWV7cbGY1vLL3vJXu6SJ7nrFUv92Tg8LfEvHe7Zaq/Uqczu9LNaFdgrbni65n37/9MX+nmVbCPzz85e/vNo7d/BpqtbenqXuEdrtxrcXJouVqThgeZhKUNvXh/J2poVJc4+32+uZMiLXHhMnISa+SwLE0PEA8k4BwEOuikDAlIA0ioh96RgMQFshtkMv9udj0byRVN7RD07osMX3zZvNXX6zkFEzJwB3n4HQceng9rPFX6osRhD4H9M5dWfA+GrzBD138+5IyKHIFSjFjAWQS43IfCdyELlR0In7FgiTQxaNQMACkWPx84XsXJ+ijdi32/tNLeeT1UOilX7wnCdwdzjhes0O9+BWNdDcsiHVVQ0wJwkkOYJGkBzDiA4W2Fq6rXJAhx5miQPAG4uDJYLymavJFKXKk53wsCjGFeEQVThMOcpIF4dIoyRlMNUMCp4aFtUFA/s4xE10v4vDgP3LzyKcAyrg8fKahOR1vrCSroXkwSL7RTW1ubRaOJwfu8p5E8W2m2ShpJ8731/d+U2WNSvmE3u6+9rOPus5HZfp07PHV3aHrl5QYLrqCBqqLZaPIK1Dik7GUkZZnONKBLnFGBPJgBVO1Fesh2FwCANxcHbiOPjZjq7FwWQxm4z+9JYa62I5KdV55wl++nF8tx1fn6a9E7w7vncJJxgyPOpT26X+74X7unmxfnB02gmQDjpaQp/XVMXFooyrlFE9u+JkZeaZfSeZF5lv6OyO8cXzzkjGsQrYuAZaK5efzcaVvlubDxFHA4iD6GITpoa2hvk+8dzckB9kZ1yKn1uWRoQUmBvRNSJSnSeEk5RlgueppvGMCAboraeLsCYPoJuSFwqcXFIg9wWEZcU3ows/o/sqgylOPlmQWZtUpN1kAcFxnqQYKcyEBCnOI4JM+ikpaMjGZCDvCctLhZiTXL1yATWzCOjDa3eg4Yp9JcIhEEIMJen57HSKktwQnWUQotxE9dkx7vM5HEgX4IGIXnPt8kA45HNqMfQdo8PKJK4HhyX9BMQ0I7QrbgWBMlF2uTDTVKEMRAwr9zNWICGBsDIOcMIjkhQCIBykrLRPcHzOSozTCT3DCpKAmhvJsBruARrFHgB/D/A59wCPYQ8QI2+5B2QMe0CIvwdUnnEP6Cj2APt7QM65B2wUe4AGZ+Gce/D6XMYYewD9PUDn3AMxhj2Q1N+CQM3IybZAjmELEB3AIFDRdao9aBL/XrEHvreaur8hpyMr/0Qyunl/14QYylIaKoOD+PBd2y9f8eVYh33mUKyjyVYEWZtatW+S4n7Rlqf6+d0a5sVypqbDVRwc+KKhZ/mpWN6ruf1Q5jDmlvRQrJx5YrcUVLWibuvn7u1sMnOVosV89ZcXHvgtYj8bWuCmKmdzqWtgvlDrfEDoRzSv24LkQCjIsC2hIC5TAE7DkkK5Z5wHONKpU4C24v91mZGDKHDlUShL3A9OIfZReZIFmu+uxsCdY1Cv1f704cvfjljvaxYbjN2W1f2uo4tlNOvq5Bff3JkCi8nCrA7cvhP6jzZE7p1nFOs8B+Jr5xGO+8hGEQqrHSEbjzi/e8NL7HUWJvPmLDA1c+SYp6tFH1GDs9BeqIACamBU5QPnP0fBQ//T08QV5wGVZUkxnzr6Kq3Nyh31tQuAr52vs1Ua9hCi5zlYuCFIUp+wRC0m3hl7XcLTCM4UlyElPdTbg584H/sk8uXvk/nj9ziH4HWJQIHzOYb8pJ9malIdSSdIloVutVq3rueeRvu2aUtucW2EBLtXofwl/Kr8pRj2nPBklgz4RnnAnrvAlMHgEbyzRPz42+eXmfhbpQqO/iTaY6fX0/GcPJcwmG6pwsO7E71Of95EE3Lbdd6uJEU3eN5+Vfq/ul3Wfpy2yz5tM6WLVfis7W5acQYnt3zTszb0+e4R/ugmj3Z7IbSdFcoMMD8p1M98mE2yzN2xzVedfb937Whvqw6wqPrf3dZtDbil7vvKHrVQ3lJeInhZ4991hli67fo4L9b6oV7C4anSgbLiPM9kLDsBM9knNhvmG4ZcZ0eY3kPChmI6HU4QOPXOWZPUdHTHviblVgshdMgjc+b2wIONZb/xch3DsA9kmHtsU9l2csgdl4/zeZnz7cjoDPSlUdmqfJLVxCV7g/rhdvYEOE2nzK2HdJ9emSSS1iH6aSCSDTlhexJe2SDznCpGOLDTdJAYq47x02O2cGJVL8Yjxnd0rsC7M0oPcxq1bSi2N66IgHcI4ZGiv7041hYTQURtOgMfjPn46E6tqrKRHOOB+KBlbhfk5FW9LN6kcEz4ORyhvp9XYkqGS6DLbPgR8/lygf9ejfEweJUEvaPwusD6WxwFNEhvhKfL5RnlWdg0dBntaaiWOMbDMKho6B2H18Wl36QpABgch9NlOL5G+1maWbHumGdua83SPujOHnIdVGxSg8Cy2xTrU0mkthQYbHK9yuSopWvAOpm1CV+bhnZ1xkTZSe685co9zEVrTPF2uRBNf/wGgwgMk5kICiUzncb71oiIEXjf/v04W/xt/qHrg4P0lsub2gmHWOWEKz//i1HumykNUDbkpdvWPW17v7WAZ07yDHAeBwnMbx0OQMAYC3kf2gKS1zjnyB7FB29Kd9Q6XzF6Dd2nk8UvW6SO0CYsdVJBCY2UvQih18FMBoxuHmgcdUT24tvHsXQ5eetIx+i5LN5qjZs0hPpCUM1562gN9zJ9ZKAphQgkItAoua9DtvEjWHO6YA3HbxesIXsU4Pwg7JGEFX7I9ZyE3aOs6AdhjxXuwPeon5GyjV7xg7KnoCzEb0jZPToFXAJlxS3BIeLqDIkgcZuqoSBxFe2997nzxcYqenHo7idMcH5GsqMfZH8jsjPyhmTfox/EJZAdgsujO4SeQ661t85B+PH43l5FeHqL5JDub+9io77DP+RiOxlxx+NgeyUzHydxB/1r9iXuMd063z6SqRYdj9VYg5munqeuCCwdcGPIER9h7+YeLbveRvoqb+Mo+jZjv5XOmfs2D/nwlXgsxypksZ9kclYheyVey7EK2UFjrLcUsq+TAwO5/N/Fby+IzBcauEdKAFq7al3thnqaESX/rDtluutgkS59VQJ0BHD6RbrB0FgAmycKjbErcceOVdBAr1rjlIJmHA2EZ8+rP6f9Q8cOmz13KW3SLWXvBMhkSvrzrHOeJ8IQrbVUGYER26QL5OWJBwdao0Bk/aJmqr+AsNWfjxZZfYShq0RYNdXYUIpFd6qxYAQkOcFK44yDnEZEGIS+ZsyYGEIMBKY9NOlA1wCxpckmKw9iu/M4LxVirIRYLgHKex3O3VRClmGSkyzXKuZUQuYXu2AYQBgJJJdd0zyRRbFa3y+ND7LdPWSut9O+q3dyOGQpwz0cMsWSTORAp5SBXEfstE+86FkYh4263nP6oOvBoeuw4lTgRx+Ju716V4xE7pAIgcwp641Yyon9iRicY5lpHFPoCoiGI5YCih0JSF18RTzRYmpZfH/2gHjYIK/rAWLZB81CgxIl+8NHmBtczYlSWNCcZVGH4AyQSEMmRpNo0JPOV8QVJ4vc54eHjfq6IhjCCoYC0r6Zq3maSJhrnQrNJIyoIbbDBVpuSPHtsCBaogAIr2ioYfFtMf/qmjk8rr5OC6+sjB02G+xSzBFZ8zyYw57Fm0udSCoQlAaS1Mh4YJONt3Vj8PIA2GBowGEz/eAa0OYMEguFPsr4YZO/LgJlsmwocpchpk1l9FpTt56imakEZIzKzEiUExjR6KU+S2uy61/wA5Mrgtjc/ucD7DDH3RWJ1dJ7DLHGKEVdM4Nqq91RrBQ1eZZRE1GsNr3NWtUuUMYLYUCqNgHVsTbSCVb1ORSYeXZMWV8nXPlrW23+YakfJt/6KTmni2um7ep7x+VVxeTbi8S3lZWfY4Q190YeskCErO232HM4X1y/j3CPsw9TtZy9vvr0F9cIwXVr+rWTPBYVkw/1N+Dqh2TmfVGL0ld1wt7eTWxb/7FzoJR6ZY4jQ2lkSP73oykf/mcnsl+apRSC5qYLx+eqF8jSZI/NcIibZspE9W/5brKyL6Yd7rq5Q2QUP5nUCWY3T+HuT/ucE98Jz1/VWHp7qsG25IRzwHcwjigE31BB/8jhGxb9v/z++5cbfwjJy9A9Fcx64HpVtuw4weVV+9NgSlOcjqRvj62bwYgcsN+lfxTrSV53NTpM2p9EiN/N++vpgXS3z/0iBTjjfcc24QHjhwX82nGGzwwy7/h4soCDvWxumkY2rgpu0MVmezr+tgR+n8bdHjehzL4YJPdyLSkKNCsKMaa4icAnsgvvSiJO1t4sRX6YY/haPCqy7El2R5HCwEob59Ujte+YapakQmAEFZEZi+jVk147LBzqhsVlQPKdeKDVqSBXLOxmrbK0DzlxhV5iwVCZfQdJnkGluh46iGhCDdVYM86ViZl9BzxAkaZrWhdQUgQABS8SUD0UHZYlfC2MSzA3LdMCTSOZZ7ob9Epzk+TaMq4MwpSjmIH+dkB2w7lYQBsCAc5Fo+jsA22oSZN/r9pQBJoKL3OXhIRRqIaxnqT1uqK2RtX6QcBXcH/PSUMDTRdFwEJpu8WPWV3tsfrdHrYrZvVlujU0UHDYi/qRVCaYGwV1zjRWEZNpBs1CcDDqF0hzjeOeGXIKNhpOEbsD2NvXmzFPrrPAdKBT1TWLKylaB7ehYTdvTlrBwduRdjzOqoNIW/dcGvWRhQD2xT4HQ6MPxwnNbJP6rQA/V8vlpbmfrMq1nq4tzOt618yek80kg5mZP7681NNlg2y2C9cfSjaXeprVq+rbR5og8nLoEgbbOfAotREDZtgMNRgBM1w/Ff9Szx88logalojgbZyoYZenrorHWA56iPvV8ZQPScsClD3CCTGk4pW0Kx2lTMNeuitt3ANn0FWasoAfhD2FfdFMH3oLwo7HR3iVfZMg6HNjCBrPwQn6Jg2pu4cDsSGHflxOnz8unW/JKVsvaDWlT6gkfljH+SAhDbaArV1HgW2/u7tDd3If/edEktOvoW8PR8+pQwMO/BP0uGpV9N/+8euXXap7R0Eu92exnKzM3nq91XyfiuUf528r5Q1YvFvNZ4u+2i13OzTthcli1WUopf/wZeieIJUmhGZwJ+/uIinrXjVVy/m7iYbBab0ySqXLkK+Mx9143TOjWqq2kwoDDioZ8DO3WVevEh9D5+OD3cFp2dnVI/eVTYQftgOXw0YWIbkdRWwPPYMryzbdFMdBmveAo+fF8kkts10c/XrJJmlf3eLNLV4wfqN2hus2g6xpsnRCEiirrT44UbhvY0nPhXhS6dwZtYmLRZnAmlgsJXbFieWMpizTSpzY9sT0bu9YPDEtGccq4GAx0Kr5fB8xHUWf95sPwoC11k76e61Gv48wbrMtd0ljq91/a9V1M89qgfk5nRb6j5ut3aNJpVUun/+f4wa3gvDmwv+WF1AZzy8vfDHLiX0ct9El31it1XLdfE1h19Zcu5tMp5uFdF61msGG/P+q9zRkSmwd0/qJf4Y/N6rGF7W2a5qXd0Wg/tL68Vzagvk+WdcPV7/6394r/7G2YsgaIkttGvpX1+zj3pt195wgYLJ7sxNpHRyFYNRcW5qpWk++md4iQtCqv+GL4+MdICO/+MpPd6seqP61DUKHd8JeA36OvEzMahsGd7LgUM+dj9WSZseSvS9iNSvfujK/QqfJLd37F2CT3LM5ntWqN4e1pdwbZoRsqz+4W02tPd/n13WfwXeXKCKxA/idSXWqaVYmM7M6UQRQlmS5kAZoCAxT8RJFKO7r7xigobBoWp/18kREDFnx1vBbPK4e0kdr9a99DKJ3iUHBUNUJTUBs8l6yEuYwkVmu8xTmiOciIgY9nRgH5tdBEch/xugaMJhNVrpw9lAfgFfZeRSJEl2SKiRIN+uZCpQYSQzFuUmh5jE7PvY5HArVAzVNRi+2q9R2Dmept8we188+vg5rOnoh+MJV+zyFDcbd8g0hFUgo1jKzQjRHWcRUS+pVb7ScqoevkCPmkpo4bsXX2kzN/VLNfHgdlux7EfCyCprrVGZNSgJ5KRw1r+EFMEukUtSCywAGaET21Yza3CEceaj3e5TREW+OrqfJdFL42DqsS+1FYMuyrqrloga5TFlX8QJCJJxbxkVzCjCJ2ZTbY12t6d5lXfh8SeIQjCeZeFTl0fGmiHLo23uhErBgpC5C3KAtLtsn3t+c3w7Bw8cwhvG1B5OIe9qrOlOtlIG9065d/2maMplCI5FK69O+dwLDp093d2VYNxDybd/r4A4Fj0sEpPnZCryhfQdpJFTUerhjYYgzuIfHeTv32Orknxdz0+CpA7QBlzlQjvg8aLbSytxWyFKLydeZmlvozUpa+PQGgMvP7Ew0bdInmzywxjH0Qs76CYJXZZgpkOXtwoVJ/eguzXtZruLlEWsHd9AKpo8Pw2ER8tuPf8rVZLaYmlvn089zJ0y+lS/Uk1lZAvyfk373YIf7bHOTaH/r9Lszb0w/BPrqOX3L5spPn/5iP/2pWDzXXwzs74pQc7OdAdbXLesfDQjtmQJ/m+vbF77N3+Fdmr1Htp5aDne77SMG8qcmX29nprvC+ChS2ouUXhiGM3zLGONEAMIEH7aSDkb14YkK69t+rj35dyCHC5bwPNjFO3yYb8Z9YW7U+rHM+1g5FzpQ88z965qmru3yqreK/KbMC+nzgyGvDJ39EP/cOtiyurZ+KL90YeZJHfIsT8LS2pjJejKrXy7MMi+WVrK69+t1N2YoeNUk5vElr8RCvT9jNTCuhAbHWMYwHeAeieCdvIFaVetsfS96H468b4/UB6L9++7vSOLp3EsL4U0k+eBw+iAGPbjVlnj6zYvh6ADZ98sQP1KT36ZMn1rDV/+xrPG24jdfO9zmPGo89YZgseZcd02zkB5/uMc6QFC6F0HfgQugagavJU1RLxYGRJYwChWQRhqt1cW6APyhf0QOBQaOM2EjgLM9UsDH6wIo8nyiza2D06r6d0hnBDj6KM5ES9+bg4cqbjCIeniLsgAp93ALR5MBFLi/5yOxpe03R+Tq/6+5Ws6Gi/r8gViF/Eykhp5xw/EwYt7k5vXEQ5Rju0fTifchHsrhmDAjmvdHcFEuEgC0NBkD2hLrYsXDoPUVB0M9JOhPjNC6DMIfoYgKaKQEGuJUudaDnVBEhmCCAVOpICTn6eWGIqA3Wk7iYQw9NOkrCkNDe4QiftitnW/wzE3sj2oDnhWyv93q188Ceks5JBBC7MaxelwlohHbQnbojgsV0wA3hhyokNes65kClgmU3rGhA6304D0Xj+5e89VaVctxm1HO4qhe2/eXDofPq7WZVWxk/2Y7vUb3ibvN42JRLB2HGa7a+bSSzKzrgHbY5de7Y+WQG9zoP2ZZ2OuWs+WT+8el2vd2ebEs3Xqr0km5WEw3OSa176/ZhW1bMHjeSfBJO17GbWQx36oxK7NyyFRzp5IuNUXcIU+LskYOuNpjUBU0gvVDGReAycrYHXDv/lSXLFcjVtZub75NVo+Wif6nfL6/7Em88MPkLu1g63NUe+pQmpuncnXF46razFk7P2thSojV6/3UoHBuTFZ/ZbWW+jtc6r89huDTl3/e1I5j9wUri/epKQm/3L90PPSg5fr8KQ3eYxunmE92Hq3eQ2y24NePzar/58OvLbTmjizZZPWH/e9v9l7/5d5Zt8fw6WFSPlt5z+XjfDsG93rA6arobLQj7EuPO0tNlpU+6eBJ+hSChvluVYvVZOcv7bPXPTf9y+f4p79++bvDuJXfH/9yyD5tD8Dt/tXb29uaiC1zrkhUHeOnB7M0Ozjn6eIBx8fAoIijVQ0cynyovQcnwMCIPeyqG51rlqWVvveF3wgfosNS9a+nVgSX2fwIikxkPcvBWaw4ZYpaDglhzJRFSvtV75DCwNhegQLq6CU1y39ppuoq8zF4WDr/tWBQlgME7gwFlKjUZfxr2kwqpyLhGYPKMK10zIoSi2gvAEbg0KCUgaTsIxKhRovByUp/zR70YoDE3Zn/Dk+L/YlQ6vpzu4xaRt7U/Su2Z2bwdpB3S51AnBqS9mM9d/WJZ97uTaB8sjQPxXQngRqa2EO5lRiHpcpfC1sQjJDaqWU5RC+/FuI0IYYZkJKMGppuZwuNP2APxtAgDd8y0P0z7O1hlYXbgHDC4Jb0fhPFwGHAERGKpMRonPv5l0/dJmD9tJ0xasGbaEq9MPg6CLQD524BRFLw5t8eK0qGukooLeyI8opxyISVtQnL9/pMSLxTJuT8x5YJIY00AT39mNIEcUEESBXPTUTdBPnViDxU4HG6mp63wKF+UMv1qotDtdCPi4GOXJWLX1vdGClLXhHWhCDdTyOBCeUppcJIhVIQsSaRMV/HYlLcBgyx4Bi8KJWJQ9nWBHUOCehdD68pe4cISxohYT+bCIpE5QCmGeM6VSgeDPx+z1AG5ok1jZl7MwwitP5redg7JTcqyU0ZShnqZQfkNMFQ85wDA+zxj3nqiU/u4YnngRwgFiOMio9IG7wUDl62XEFZSg3vcXDMTZKizHCVpangUVuueG402XQN7jpvA8z7iOkKAVrul891lUdX4rJHRUo0sD/rbpcnTmGSMparjHAGSMQuT21PiqbQGzRdBbqcOjQGO0YxJj4ipetayG1Pd2UE5AzkrNeSJLOnW6lUYCu1pSA4HrkF75udUPKAfzLg/mIxCjWarJn3SW5aquMYwlSSHrkJpwkDkhmgUg5QxBYhg2K0ELnbfLdeTCRGIh3ZI8Hpiuld6t0YGZpD05vjq1ACpTYc5RKjNCI39xOiZKAlTKjl9hFh1wC195igc73UJlXEMzNUpz1VjcA8UZaBpkrY96iMKLulZ2pLGjK1Q37EKNKboPdMcFrGEbCkypiuWS1TkCaC5wYpTQWEOmJXnkYV7xB8L2VNRDnf79qLQstcaKwkJVk/6d7q5nmqjeX3ucxIHo/cnEHfmSbp8HyLQDiZxzCsybv2o1BWEdzkVPX8KI6/U4INSDmWVrRHZOjA68G0abjU89EHPCn48PD0iWLRT5M/JpsLZR/+vxW/VzfdQOsKmwBayJSuN0xdUgnoiwSSAIMpzQ1hKYmo4VuAYI9JWBM+oOQHq2WuKPFp9rz6c+qj7CrbAdLSbYB1rjIhuoxJYZwAbEUQSDHUKqLiwb0x4lbXDzh4Q07BE0V0yDv2E1oEiAoBmmjW4zNW109MxjKaA5zCnMRsCImGbGaY1ctCymeMmA55155CWsYBcCpTo3vptFRnCeS5kjjDXMVMp5XUJzi0ciVgXnI0JDmLEQsge3gLA+NgekVxXrHcsD7O5fncqdlk6h7yFzP9Zhy5+wlBsCmO6/UDcX9uAlWJh1Tc9QbN1PV93UEz7rf6g2Y2M1/oTWfmyy6a96a7NOeoO96lUQ1GUtYnBPFx5w1l2beuT2L/TuA0412GS+5Pa7k5cPbK8DDQPVypw1rRw87CkbWi5fHoTFqiN905Sx3Mwi5ibzn1D0Bzgv1RS3shuyF1F9gt2keCbO6741s95lBkCz/PHMgTIZufHNl7uI0vYYyzW8LoZjj7iTgsMIPzVPO5YRNJvsLEDFY6fwmFoJ9kIyVIE5ZqIA01ipGI3n4syUADb1oTdN2/ISssRqsdeoT79yKoKbGr0bnjIDUu4t7Nu7CETZAGRhOmNRcRM+QE9EgpAglyAZdNc6ZeR8p37dhlpZeOIM4l7s+xQijJUmAIpJnMQEzrWQzPbsBJdzLrmR6RIndFBC+tZyJUnpOew4wancBUAmIwQYRF9OQTKYcEH57wUB6djMKs92txd60ErzJtcg5T0XOXIAKSnBBMOFBSqYjSuZW7G3IHInWBwA3nMcj9rt2hrEy0IVZMY8r6Ce4wyQViltdKnaMsokNceA5xIdmA3DA4rjtGYhV9195QhisBnnMNu/xc5jlMFGc51CbPMIIRvaEc7EHvZupkj95Rzve7TpxkrDK2Ug648sMdjEqSpTIXKYyonjPuR1Vhmzrbm4cU6A0XJRGevevUSUZrioNc95LpJKX2H0AwRcQIFjEzmlNfgAdGNYRGzUbJjGZ7OMEu07gGZT2SFXtI5Mpxaw0b4zrNkgzYHzOTSSQiSmcIfce/lIHDCwPqWNQC6wPHaoBQ+XzVfa6512NzOeuU1D8GelcNy+xPVGFbdu1YtXkYeNOGMKmWniWuf/FEG7/4uxoRtTU7YxS9ANrQ365eAAezGulVR0IyZDUyoDuiKIXYAe5z1ACNNwkuEQR74aVbKOG+caTyt76Y5cRumgsQ7B82bfqX94JLzYizkQSX/NIe6bsK946agr4khKARjSfof8resSvyTQr+mFfwB4EIZXXQgO+ZRFFsr7da9w2CQpwzn5oyQE0S6jh2eMZwgJpH+BkvgppvEhSyKxjkXOEQPUM5V1FakrM9HIkj0QQGaSbxcknC4n5kWVLEl9K+drh3Lgn1fFvtjMBTyPs9PJcjARg/O8LIuBCGfE3haIRB/06+jywmwg5LG31fCGOjQhj3+8YA31u+L8I490xqIPbLhzsGYTzkm+35enSLjI07BxuWgcwMPTyb/uGZMrPe1ILxN0sM5Ub/zNxft55irWrNTe60tQ9x7XlFz1QO8yjaIe+vdJ7sNWr8JHOjmwtPJv06LbqzZtOAs2+f+bMnWnFnqgWwi73xxufuXOLLTfdP57ccFpC1W91zUvIuHIdOyic3zfg3e0Dc66elWvQPpJcPmuc50jokPzKWuoLTSLN4Pc2wzebsdZsNHJEjQhl7+Rf5AU4e/bicPn9cKv2HKQ/A7g0u/Tut3N57/lTrBgqk397d3aE76XE5chPIL+5mA1tBVWYDR6Cf8GQZCjRTwzxg0UfkcFblmGfVmJnOyJldx3Fp7id2/59vvUHwd/6Beqd9p6U7lXc6NYIQUyYlmTqmaWCacCFlBgnSUDXFwT78IiCL90UnDg1IaO7aG8vhl83ELhQ+UBIF588PZGcL3Nt/r44QoAFR+bpG2F1RudH5qha1W3W+A4X561ZYTQCbP29kucP/YUvrH3b+Guk5ZOflnwA7p+7vPiw7/iEifChcUeOb6aWFRBkgEBCue3hULrR+5jz6EvQyP2jjGY9fI7ONA+rHlZNmHWG7OkD53dGF41N54+Szd9sNeOQ7lcbliHUj0wyKlHSksTRYJ9xhAKWpztoR6yeQxgj0vR8HiOPDu3ftxUrEHklnP1jJ7m4sfeFAhlmiJyu3a0qKflDv2APpGc7npR76Qb2opa40MMr5xGL8t//7V3shVdm9S3YD+bKU6e3I06jerFJBv1MLJ+Wbb+yJdvFOZw/KKnlYSpyCMnCf5k1bZZom2BgFIU/TjG4ztPv+bhzJKcc83iICwj40adyfPR1N1u/hk/vBb3Y1ym+i5G9nNkRmKS+OzvE5zGFN/K6GwzBWdmAjKcgz2CtIMRAmjJhUU8wZ0xFHbDDkzdSlm5LibrA2lOh1eNreQc67cwZpZmamlXbMx4PiO/UqMw4qKELF8l7OYYpBgiWERAJOrXIUMefQ7z1DwVDTCuWPHtFT/gAg2k3rKlsWEBiW2+2cms/F49JBQi8ni/UpVTHzfWEFlCnha4Xk1G9JKXZ7RK8YqKgEaoZMpnpF+FjQhKmMi9SQjGu9WyuzL9FAR4vlkcFNynyjpOFhe+1Q4O2Idnf76WjvusyXV2W+Gcm9LpdW80polgKODUdGRuy43Xbb2PTbHroAaKAI8Igq/jMJ1Aejpnbl9iPFepJbqjo8uO02MzUZcKfD3MEd+yAMmRhg3CNdPC7sqtrT3B7rrCdRM8MSZjmUlapKqbZecZOCAHw76X5qv8W3mbZnFdqFTXRzn0MyDDvmT53OGMhr+PTpzv4JcNZt2VsnCZ6xfr0CCjRFoYGyTHpFOuxSpelkPfvTO3sSHHT2rojPV2UzudWNesXeEmFpXyogEFVKoohFUINBOVgGUqRC3XlolBSpt+b9i8fVQ/HNfaYPwd3JbtcLQVDyfMABNll/zqLMEsR4ziA2kMaclMto34pCIOAQDE1jRicK/jXjWA9RNq9SASAlGBCnKs173aMyBBMMmEoFITlPG+/O3lmHrQTeUzqjoHs1AvYg6odKRCDyHPJFH1Fi9hYid15kpitw82Uxnzz68ZGqF+/ezO7cVYsOsEn51morhKHLXQHO1XFnacgARyChWhjEoUgYbzptlIOeOZWccSO1VcEi+oLwYEQR40M/eBPc7IEJXZAk3Ymv1Xpi7BebqWUEcx9l4x50czjKqKQUJzKFlEpELMrSrPE4WtGZOvspZ0DzmPOlOfHUNQ5haKZ8aJDlEeGWkcBsaDksiiezzOYDTnbxzu99YLgR0xgILgUGCRHM6qxEJAoDWaOQ5ToRkEiuMBGYROwBJ6jf0xO6HnMBHIZ6t1wuuxviUD9YRvfso/DiPdsHo9AKViQTnAOQZ4I5FKpm6KfgVlXMDYGYG40j8kII/WJaEqgPgU248GIDgVtt17ysivWwJ94Z9hCmlFvGRyHKMsVIogGuOaCAkCepxlABnVNgImIPe10wMQx0VWumwl6s1+QF5pcaNV+t1XSAwYtPpT4UgwRSRkmiRC5VhnWiEaG1FAYoTwwgaZYDxqCJaHG0405fkMEkIIPJ4fbrCBmgmtrNUn30IXDx3uMD0UcoxpCQRBqdI2kN3By0nSWRMBaMFpspkPbzEbuEEtTngCjQJBSCQAADRamgG3jtUNOA9sJDxEcQn0CUaCoN0qgkPqqJrwxOhEmx6w4sAIrYbEsQvzmTYKHmTKGwQYSm/gjs4aF9084mr5zz1emM4oZ09No8AsBuXtPBMTQerD08I2mHIji8tVYmtypU9W8PbW4gY/uO+7d/+/1n4tHb7nf4XaSYDK0h+lQxz6HTtBY6cqrYOBJkp5N0aVZPyvMBInDxmfgHs2eXcmg1Q5xJK4N1kkOBGl80QEmmMqqFSEmmRET2zMTAFx3KsgeBoJo/YfIyNcPJUvvQu/wgB4alceVSrjHjknKOEmEVO2Zyk7gx1w5YIhEZyxIuoTEOWpBHlPvMzxYIjW4KuZ5Pmu96St4WcPmZxYOPrYtP/z+QrXEJGAAk4RZ8yCqgCYaV21kkMqM0AShDaQZYpvKIs9cRBz5bo2wIwFBeqjyxw6/fc6LSBDddJeDOHh179MOnoR4V8/Uie6Gbx7l63w9PSbW4/inZHZwZNnHra+C1Eh9I8du/7irUZqOftRgBplLQQXQkMGw0NOLuiE7Eo+WTc/vf96/25crHwXtzD1puye0ZTiQ1yMpk4LhlLaul5HlCMm60VFzmPKZ7EPvRkUCGaXBQEzmxtD5Xy0BHNP1gXIxkrE0D6xkhN02zoaWZFetOB8FiuT5woec84p0N7p1wuNsFO8ZWgcifRHLmVoFvf1oc1i71tPz+6cv4T0tng/un5fIaa1IG3vS0DD3CTd7m+/L+O82CS5noXBDFEXOahWk0C0YSQLkBlrVxGnXkJ/CnNgUajIiAeksiTHBGzayy0fr+XzuYoQkegFskuwEEeAtQ8zra7Kb24IzF8w/lLZFYEoiYG/nhVSEe3XifC3LrqlrtjSFnhHv3tRbaLQdUiPoznh0Wr2U6gqG5IjHG4w0Eelr2BzjYQ3CVJjn16U1BaDhQnGbpQ6I3Nxktz4oXr0QDprcXbwpGJZuzMhbeFG+u3ECAeiiLFV30/fUAX1x0cahIr5fK5BNfjUa71eirVP4EIiIhORaZK2zBEII6BGRyllCsXIGWRkxHLBiF0Nf9W2Og64MPRBbblpDX4N2caKPtR30Mvrf4tsMgJyRJJWQszYzDIKwNEA5YkimkuYaa0jRm9i0dxLcpGYKQBkwQergJMloMrhZuuHBaFAMYHhbrvgoYSkJkIrVWqWIlDHENQ5HyhKtMIEDSVOcR2z+RASdsGs92vSBNwlv89NuAnnnEEMqLp76bC2upjxKmRKaslLFch/DGC2LlImHCUGmE4SBihxxB+/EVBJpZlC/VALDDedA4GI5Vn1eWziujltrPgUDvrOipRB0Frpkq04JWqBM16ihlCdAmtWqYYCiPORhV+i3n+JDntHwgfs+5AM+5jtZcx1Af04QgwBQTJfXrDBgB0jThEmcupitwFtHzCgHEvubTtK/t0F8G+nKJw0XOmZhO6iZczf2BDHVE4b0xFEaSnGcwrxlKk1SFmUpYBvJcYiQzExFS0qsoxyygw7CAFCOXZM1tMHa3WLp1PphBXgp+Zw6ECnAcJdaeIlle8TBVAw4imBCZ4RwqTnJtIgLOGzJD+LCxYNB/4HvbRg247TVrT6uV37egLol/Z9BjCCSIAQJIBb20Fp9K40TDlClkPwNjNrWk0A9bB6LWoSDARTXd2wq9ZeFGSeoH5fsL8DtzW1Xw4yQxmudagh78iM6SPCc8N1SwXJCYJZOoDz+yX/LyES1VR+uzUplarI3+ulQWfR4K35nXqkShdH4qnjrfhEMhrZ2nQiDhioNyyVOMlIxau9kHId2kK/eSdwIS+IiuZ6MFYlpM5n6NEH5ndRwVAqnrmWG5YWXFNgiUhudJhnJAENVAy4h8kA+r02TA6ACh6uFLEsQvIDBz4ng54IIX38LqqGaTEjt7/y4lGtifXedRzeoydm4hmTKWq8zhlUSMZcpBORsbOlNYSB88vIh9tDB8yLK1mS18GL4/dy4EgODEOTRTUAljUrNCbaxForiB1mDgOo/YRQP5KiEN1OmGZuiSK2KE08naWHr6o2xqdeO9YZBb6OUwBVp0MSjsIpOMghQbLXlGIoYUCPI1QhhK5g6EFBq34DWA8MmkX6fFfR+CEF28Y+bY1s/lyBHElLZv9AZ7CZpAjfJUppnCLGZHoc0cr9ZAhjJgm4iAROZReoG/VIUzyKB9XcpuNl9lk9V6azpuKHP3rBpqs74eWya7AzNXkzQMgZfvCbFAITiGLOUTpxm9Eo4wWFBuYfX43e2aa67rbvO4WEyfR4vOcrlfy8V+bZbaB+pu5n01QJUcDRhnwKBu0lN6EvzEHp3X4lQEywCz1VqNl2s2y+tjcbdz82qwiFF/rBFkZGhSXyDHDAvwmblXejoZLxSb9fWxuNvNeTVYRH6XQopDru63QONZQeCqpn0IXLyX8QjTGiKRIJpmuPZ0syZRnbNEpIRh4Bqpwpi5DmjYsYgPJ2mSgG19UR16X6p8Xxbfn1d/Tn0Qvkcfo2VCSYoBcE7GDgiFMTrhWOdZmqNM04jhFurNcoU4kDIKWSBn0O95edEYtBaCGbDB9+hhdB0oJUCoTlulbao8oAlVPINGwxzlMfNuhh2xSCDSEpxaQ64o+WFp1g/2q7LUz/+i7y/X1ZXy84RADTNe4bBJns+Jm6NEsZtpLLKMRww8ezNXIQ6ULtaDqjwUXlHAz0JE+/Ypv/jc1yP93K7r852AUhOUdkdrC53liZYSQZNxiNKIKMTcQyGRQ6OkaXxxsVNrXqpdVLNU+Vzw4kF4DBdkMtGYMVxl/NOmiATRLEFCMZwqTLSMmPEvfA8NHjanDPZu2tUxYbQumtVMLdfZaD001fKGkcc62/36nTQW+gMTWQ4Bee1OmtVicl+sZ35GIn1/mdmWJQqZIC5EXmVm07omRcgMJIAApo2RmcmjJoL5PBGG5pns6i0zUqYYjqA8zlNH/tFyxXZ9/cPwTmIoEFAv8EzglQSewzLaSr/JiNHYLK/vOtrNma8GjJIN5loTMHRhX7t8fnxa3ftxNPoe/dcYicRayRCpymtTl01JIkTiSpW0walWEES0l4WPPxqac8hPNmvuzML5Ca7MfOXaHI+UH24W2D8Ou0fPXg9H9IQzYyE4no4hDltwsD3aS14jN2KCcpzwlCq7BZYbpWlTPpenNDEZk0JzoXEetwVHj/yhpj8yUMSJ4OEZquMQfffFV/N98U355529P3cdh66fFFWSaiNKwLU9X2SecCoI4poJrSLaplQOkqKDBZsw1HA7ivy7caXjxbrz8b9a1vrwa5E5QP78/wE= \ No newline at end of file +7V1ZV+rKtv4t92GNsc9DHNVX6lEU7AU71PWyR5qKoPSh//W3KhAkSamgURH32uesBQHS1PxmU7P9g/ea44Oe06mdtX3Z+IOAP/6D9/8gBDFh6h99ZDI7wuMDD726P//S84Gr+lTOD4L50UHdl2Hii/12u9Gvd5IHvXarJb1+4pjT67VHya8F7Ubyqh3nQWYOXHlOI3v0tu73a7OjNgXPxw9l/aEWXxmC+Seu4z099NqD1vx6fxAOoj+zj5tOfK7598Oa47dHS4dw8Q/e67Xb/dmr5nhPNvTaxss2+13phU8X992Trb7hBzeh7JXdR71kCDQcV5Ftfpv6Z41662n2vtbv65Xe1T9EpYd6vzZwd7x2U71pyb7v9J3Eq35PSvVP0wn7sqdeeO1GQ12j3Qv1m5rT64c7/k6nMXiot/QPB/3ZBRe3jcTi2RfPFPYnMTn0inayjzZ/2qHs9eXYhAvHjc8Als5/INtN2e9N1Pfmv8LEnv1uDlfI7PmB0TP1IZwvU22J8nR+zJkD7mFx7udF1ysze6j47RIZ8iJLUO/JWruRIIvbaLtJsrSCHX+JJDNa7OjjKYJg8CpBFGw7+mW9GTFSIfp3N+zMeBGoI078JqiPpTprQVOprjjsVD9epR3W+/V2S33utvt99QTPX9ht1B/0B/12Jz6zepd8dK/RHvg7swUYqMVTT9BXsJivhROGsq+hhxhDGJDoFUW2IApmJewGyPUAs4h6ZUEouSUkdSwuAXEExLaH+U6n9aAuboDcDKkG0GXBtYQd+uXQOZf9Ubv39Aexhvpmwa8P1csH/fKmchUfVedb+sDw3WsldMOXvv1t2ISvYlORRd9frd/USwHVy54M69O5LNDY7LTrrX606rTwh+5rtA76CpGRFtI/cOYQbMig/yIyw47j1VsP1/rNvkXUkUChcK7LII7fz2/MIJnXwdI4iZnJn2VptwQ0BrJAI2BDZFQOqqPdka1QWwEJSKAtFFYMKZOpZEOOXIkjYWXPhJXtetRyXI6QQAGH2HtZWK0LL2GDJL44YFk9SHkWY5j/VIx1Jv1au7WMMa898Gq+m4QYXgtib+InerjCwljcazfavehUsbn4EsQyUM2i2YxCjTwr+ih8EYvPEMQQYAY5tDyGJAMQWQ7leK4tkQstH9tUQMgpC9w5AKOTF5YN4Fa7JfNBpg1TyKSQGpDJDNKPbA8ye+rG2wprPZkEJ9lG+Uexln/YdgQhy/JPUOlaCHk2dhm1OctR/hGQQhnDhn2AsA0owxuCMpOtNao/1Z8PWEG7Zx21r5MQoj9dvr0TZSDSsoAjbvveMsocEFgYIuQT4CDB3fxQBiElCZghEDsWlmBmG7YMNtsQlK0iy2rSaaiHUV9p9+uBorTGiCZBU4ahoqBb7/lJDLLfikE+wyDHFIFlSw9AYFElcDxOA1eKHDFIOUtAkMAsAqGgBkPPzgOCGcfV3HwcOo2BXEbjEv1HtXpfXqlNl34/6jmd5CYvVKrxSS4ReY/vw6KmVVBvNOLjc4vkoef4dbV6y1/fK6k/hl1cvBn01PcVsNX+2AlrGhbRZTuyV1cLIHtXs+3gHB9t/a6vFxfrtxEAo9/odwunn37z0FC4mL9WOKl78Zei54m9j3h+3YrTV9dqzTaUAOYDBp7UejTeZSacXxwatF58cA00bIYl1al1gk4zKX7sbbSiopc2YL6PHbms34DnW9KjQnIpsO/a+cmWWHMtTHVskC2IGOD0yc7UmRtL83fa5eXGB3YrZ4tjvcXBTqcx11/qlGfqlureslfMTZ9O22GGqyx5z8K+0w99vdK9dr/taYNtRZfcP9d7FfU7JbYQuNmv/O/D3rm1uWl27wteWmah19346kC9E8qZBIyYKQK1er+ubGeeLd0gJdvVcd+RduClhDgxCfHXNEAeNh5AKS4AOCtVETBsJSDNRcW+xQYZ0M4Qu0Qv1h204w+smXd0Vysd1hk/fxif5UpBxmlqBfg6D5jYZQm3+wp/rqMwgsClXOK6L8B4+PwECXfz65GQdZFrIxczZkAulbafdiIbkZsLOnFyB8JEVkQjYNiBiE3x85n2uWmKxmpfLe/Q6bXqYc3yHK+Wcp7A18MJ27vt0A+uTAOPKzHkLJsGmBMLkgBBaZMAwxwdLHCx010YBzTrYRbYALxNcbDkYHwGTr2BXCelO+F6UYwtwiGa4dDlyCfLOEQeslwGXY9Bm7uS5eqCgUkc4ji6v4xDw/6Xf4lyNpiA79fXxKSvg47SdAtIrq2y3zRT40NhR+P8vXfZiqPYapEUlLzJ0vVnZ/6W22q2W3XF3UlrZ5X7+Twpk6RnQq68Hrp6w4BZNkdQ1mxRcgR5nsnQ8ZnLKMuHXYlNdjDGRDCglBNNG9bZMDiEhjg4++Q4+JexrsJBvdOsbzz3RhZrp1ePzHntCR79x74vsW+apgkOfj2+9xM4GDK80Vy7TP3rtr5cq92vaTq9CpAldCwI/bVbVdzuRHGVKKqn7tgKZctXn1ittp/e6Lwe48vPOyMYx45hjyuh2uXyL9vjirRbm2cRRw2Ig+jHJkxl9xpyXE+5uSFfa5/xU/zcItpECBtzaS9vIlwvsAgnLvNtHrgezW8TwQDdSdkiLM4DWE7JMwVOflIg9w2E+e2h9NrpjO6tDKZo/aRApvaktrucLGBzHFguRg5mtgAuDnIEmUinpKCsGBOGvCcsfirEtOZKlAs4TYWAJLxeDzRssa/E1giEEENBEj47z0VWIInn+xCiQObqs2M8LeewIV2AGyJ68bGfB8KsnHM6Wd8xWq9MYntwGNHPhpj6hC6rW5tAYTnqdqHvUQf5IMewcjJjBRJiCCtjgyR8R5KCAYSZlJXFE7w/ZyUP7oSpjRUkBjM3p41Vdg3QRqwBSK8B/so1wJuwBoiR71wDsglrQEh6Daj4wjWgG7EGOL0G5CvXgG3EGqAML3zlGnw8lzGPNYDpNUBfuQb2JqyBoOklMNSMfNoSiE1YAkQzMDBUdH3WGsSJfx9Yg7S3mur/TE5HFv3JadPNk6tm21ldSk1lcBCvv2qr5Su+HetQz2yKdcTZisBfpFatmqS4WrRlNH9+fQ+tdq/pNLJ3sXbgi5qe5Z9278FpqS/5GmP6lmrtUG9P1JKCWa2oXvqW/tivN3WlaLsV/u+NB/6O2M8zLXBclfN8aHmD+Uat8xqhHzt+vyhINoSCJHshFMSFC8DniCRT7hnnBon02SlAL+L/Y5mRmSjwzKMQlbivnUKcRuWn3KAc6xoDzcdgfq/q1W7l6B33+5GbNcZuo+p+3dFFCZr+jPPbQ81ToFPvyHDN5ftE/9EzkRP8jPLiZ0N87WuU4yq60TaF1d6hG9/BvyvDy16JF+qtmBeY09TkaLlhJ4moDC8sDsyAAubAmJUPfD0fGZn+n1FdF+cBx/etdquh6et4ngw1q/d1ALyvfZ0Lo2EFJfo1jIVjglhzDrOcTj3FYx9LeNoAnuLCZKSbenvwT87H/hT9clpvDcb5MMHHEoEM/LkJ+Un/NJ36jCW1Ium1vYVVq+9rkrBovzdtSd/cIkKC9TtT/hL+UP5SHvs5O6WzhME3yg37uR+YMmhkwZIiYuFq/20h/l2pghvPiYrtvH5jczhPJwy6L1Th4dcTvT6f3+w45PYav21Jiq6R384cr7zcZe0/bvvZ3NZ0vHZo5rXXm1Z8gZNbfCuvZX2+K4Q/lpNHl3shLDorRBlg6aTQdOZDs+77+oyLfNXm+EG3o92ZdYBFs3/1afXSgB2qrxf1qIVih/IIwb05/nVniJ5erkKr3fdq81tYP1XaUFYcBL7Ia5+AmUgSm2XzDU2us3dsvbOENcV0liSBgeu1s8aa01Gz/ZyUL+4QTEyes2ReMDx43tk/e7neI7DXFJgrLFPUdjIrHXuDVivK+dZk1Bv0nnT8MHqSsK6TvcH84V7tCfA5nTJfZNJVemWSnKwOO5kGIlhWEi444YMNMr/SxDAHduIOEptqY/wz8DtarXqdzVHjr3SuwK9nlK7nNFq0oXi5cUUOeIcQvlP1Lw5uaosJI6KeOwOvjfn80e0qU+VZc2wOxDMtc5dBTj7Uy+JbCsfsdA6Hqe/nlmwlzSXQUTb8Bsv56AYfw01khlQlQYIVPhZY/w5WQJn0Rvh5uTwbyQvPDV02lhtmt7iJzJCpaEiww8fi0t/SFABk2OHzMhw/Yv30ZLPdX9qe6aWVPfWgr/aQW0LFc2oQ6C03xdqLiLQoBQbPuV5RclRPN2CtNxcJX88N7eYZE1Enua8tV05gLrfGFN+XCxH3x48xiEA2mYkgUzLT53jfYhWxAd63x0Gzc9TaXfbBQbrDxZ+5Ew6xmRMu+v6hdPSVKTVQ1uSle6l72sv91gyeOcF9wHk+SGDp1uEAGDZjJu/DooDkI845skLxwbfSHS2crxh9hO6NeufwBa1je9KsdVybEppT9iKEqQ5mwrDp5obGUe/IXvz+OJYXTd56p2P0q3a8s3t8TkOYHzCaOd8dreGpTB9haEphGxIRaC65r1mx8V+w5vOCNRx/X7CGrFCA8x9h30lYOx1y/UrCrlBW9B9h36vcQdqj/oWUje2K/yj7GZSF+Bspu0KngJ9AWXuHYBNxPR/ZRuLGVUNG4jo08dn+0oWlMvTyoXs6YYLzLyQ7+o/s30R2Rr6R7Cv0g/gJZIfg59EdwpRDbrHf+grCb47v7UOEpztIZOn+/S42mnb4m1xsn0bczXGwfVCYbyZxM/1rViXue7p1fn8k0+kseaw2NZip63nmFYGRA24TcsQ3sHdzgpbL3kb6IW/jRvRtxulWOl/ctzkrh7fEY7mpShank0y+VMluiddyU5VspjHWdyrZj+mBjF6+aF+9oTLfaOCeUwJQX1frenqop9yg5J/+Uplu31ikSz+UAJ0DONNFusbQmAGbnxQaY1vijt1URQNT1RqfqWg2o4FwcxJ2G0mmY+vNnvspbdIVZUs28IVLkvOsAx5YtiSe5wnHJzDHNuk2SuWJGwdaI0Nk/UfNVH8DYWF3oJCVRBjaSoTNphpLSrG9PNXYZgRYAcGOh30OApojwiBMW8aM2VmIAcO0hzgdaBsg1pN+PUxB7PU8zp8KMRZBLBAABYkO53oqIfMxCYgfeE6eUwlZutgFQwPCiCG5bJvmiXTaYf+hJ9Mge72HzPZ22tf1ThqHzGU4gUPmMMu3A+C5lIHAy7HTPklFz8w4jM31hNMHbQ8OdYcVbQIP0kh83au3xUjkGokQiICyxIilgKhXROIAC9/DeSpdG6LsiCWDYUcMWhdvkUxUmOq1x5MUENcb5LU9QIz6oCloUOKI5PARpgdXc+I42KYB83MdgpNBIjVtMeJEg4R23iKpWO8EaXm43qivLYIhnMHQhjS5zfW4awkYeJ5re0zAHC3ExXCBhTSkeCdbEC2QAYRbNNSwPey0/tXNHAbhv412qqyMrTcb7KdsR8Rc5sEAJna8gfAsQW0EhYTElSI/sInY2/q84eUGsEHTgMN4+sE2oE1vSBQUkijj603++hEoE1FDkZKPmCdnm1611Z1P0fQdC/iMCl8KFBCY46aXpkVanF3/hh+YbBHEWuqfNMDWc9xtkVqNvMcQexi5aHmbQT1l3VHsOFQGvk9ljmo17m22MO0MZbwQGrRqHFDd1EY6xqo+jQLZ8t9T1rcUrjxbVJvv9rxafZhMyfm8uKa7uPsEu3yomPzlIvGXysq/YoQ1T408ZIYI2aLfYsLh/OP6fZh7nO02nF7z49Wnh7oRgu7WdLaUPJYrJmvzK+DZC6uZutACpR/qhP1yN7GX+o99BUppqsxxw1CaMyQvBjJ6+KJW2W/NUjJB87kLx/6sF0hP+oN4OMSfeMrE7O/oUytUbxpL0vX5DDmjeCRdrZj1PIVSVz1nPe2E5x9qLP1yqsFLyQlfAd/MOCITfE0F/RsOX7PqP7y+rvxJDyF5G7qfBbMEuD6ULbuZ4EpV+1NjSlM+HUm/H1t/MiNywGqHztv9ejDvarSetv8UJV5qJe8nAdLXfe4/UoEznnRsE27Y/DCDXzuf4TOZzDu+OVnAxl42f+JGNroKLtPF5uV0/JcS+NM0Xu5xY8rsy4PkqVxLigzNikyCKd9E4E/aF5YiItb7qVmKfD3H8LZ4VETUk6xEkYOB0jbaq0fmvmPqMcu1bYygQ4TPcvTqiVQ7LGzqhsWFQfN98kCrz4Jcu6MWK/TdJOTsLfQS2wxF2XeQBD50nGUPHUTUopJ62GOcOzLP7DuQAhSJu6YtA0rYBkDBHwmoBIrWyxLeFsFlMz0tUwHNQyLwveWglxtIK/CU4PIhdDnKM9C/GJAdSy5msIaAQXLRXGz2jDUUp8n/VmsoB5raqcxdYlJGphrG+SStjxW1xabWfwT8gPRPOWmooemibdihLLrFb7K5mhD1r3vYtljUR+nWUEKbw0TUj7jCwlw60AuYh50ck2kyzUKwMepnSHPNxz2TlRRsYyRF3h3Avr/ejKX0OjNMB/qsumZ7S4rWwY5p2M23k9bm4PtIuznOqrVIO++5tNEsCwFMqn0Osps+nE9o5iWtv1DgX9VyuScf6mF0r5/XFuZjvWuaE+t5kkFTtgZv3+rnZYM8Lxeef8l6PpSwrD5U376hCSJvhy6hsZ0Dz6U2IiMM46EGGyAM+6P2rTPZTYlEFItEBHfyiRouy9SwPcjLQQ9xsjqe8ixpmYGy73BCZKm4Je1KN1Kn4VS6K43dA19gq8RlAf8R9jP2F/H0oe8g7Ob4CLeybxIESWkMQew5+IS+SVnqruBAjMnhDXqNSaGnfUva2HrDqol8QhHxzTbOroDU2AJ27joyLHupVEIlsYr980maM11Dv2COhFOHGhz4n9DjamGiX52fVV4z3ZcM5Gh9Or16KFe265XlO2r3nr6+rVRqwGIpbDU7SbNbvO7QVAfqnXBZoET+w7eh+wmpNCY0g5IolXIy1lPVVAvJv5xoaJzWK3KpdMnKlc1xN273zKgFVReTCg0OKmHwMy+yrj6kPrLOx5pawUbU2TVF7i2bCJ9tBy6yjSxMejsXtZ31DIZKbOopjpk074xED9q9kdPzX5Po20s2QZPmFo9P8cbmN9fOcMvNIOc06WklCRxlrda0Kly1sWTKhfip2nlp1CZud6IEVkthyVJ3bCnJKKMyLUur7ZSaft07lp+aFoxjx+BgkVCZ+XwVNZ2LPZ9uPggNu7XFpL+PWvSrKONFtuVr2lhZ98OFuS5b/lxh7ruNtvf058Xu0WRmVfYmd1oa7NiExwfuowMoiudHByqyV1ePoxc6khth3+n148u01b3Fx0r1RuP5RpbeLSyDZ/LfztfUtJV4cUzrHt+HxdjUqDh9dU+t6KwIzC86fzydtiDH9f784ebv7hPv0o/1IobURqTnyZj+s2PqcR9kf5lPEJD+g3wVaUs4MsEoPtaTDadfH8rETZigNb9CRcvxJSCjdPFVOt1t9kDznz0jNHsmnGrAz1EqE3O2DJkzKXA4k6WvzTXNK7ecuhCbi/IX7yxdoRPnlq78Axgn9zyz5+yun5l1QblvzAh5qf6gFDbUfj4pr+d9Bn9doojAGuAl6XquR/0omZnNE0UAZZYf2EICDwLJnPwSRShO2u8YoKyyiFufJfJE7Dx0xXfDrzMIa+5A7fr7aQyiX4lBm6FZJzQbYhkkkpUwh5bwAy9wYYB4YOeIwZRNjA3z66BtyH/GaBsw6NdDr633Q0kAbmXnUWRH6BLUQTZZznqmNrKkIJLiQLrQ43l2fExKOGSqB4qbjP7YrlIvSzhFvZ4/6E/S+Fqv6egPwReetc9zsMR4uXzDFg6wKPaEr5RogPwcUy1pqnpjIakS+DI5Yn5SE8cX8dWXDfnQc5ppeK2X7Psj4KUMNN2pTG0pCeSRcvT4HF4AM0s4DlXgkoABmqP4ikdtvqIcuan3ey6jI74dXaN6o95OY2u9LrU/AltKdM1aLnogEC5bNryAbVucK8FFAwowybMpd0p0Lbbuy6ILf12SOASbk0y8UeXR+U0R5TC93zOVgBkjdTnEDRbFZavE+2P+XSK4mQ3z2HytICTy5fZZnannOBImuN3T/aepy4QLpUCOO+f2lRMY9vZKpSisawj5Lj5bwh0ysksOSEtnK/CY9ktII6ai1vUdC1mcwRU8zi9Ljxed/K12S8Z4WgJaRsqsqUfSMqgZeo7cmSHL6dT/bTotBb1mRIs0vQHgYp99EU3j9Mk4Dyx2DL2Rs/4JwasozGTI8tbhQmv+6DrNuxfdxdsj1tbuoGVMH8+Gw3LIb3//U4b1Zqchd7RPPwi0MhlGb5yRDBUB/u9Tr51Z4aTYfE6039H23RcvTDIE+uE5fb34yD97/1Pf3mt3JvMLA/Vb29Tc7NUA68du6zwGoeIpcNTydt64WnqFX7PsU2RLmOXwdbd9joH8hgz6LwvT18L4KKe0FyFSYRjO8A5jjBMbEGbzbCtpY1QfflJh/aKfa0L/rSnhjCU8NXXzGh9yKPUFA+n0B1HeR6hd6MBp+fpv3TS1r25v9lE7+BPlhSTlQVZWmnjfJD9fHGw5O9avRRftyJY1D3lGnNBTe0yrX2/O33ZkL2j3lGbVn8/vO96Ggg9NYt685JW8UJ+esWoYV0KNYyzz2DrAFRLBl/IG5qba0tInovfmyPvLkXpDtH/V9d2QeDpPpYXwOJK8djg9E4POnOqFePqfN8PRBrKvliH+Tkv+JWP6sy18Z6pE485M3vy7JG2+xoynqSFYLObr5a2ZyY5f32NtIChdiaC/wAUwawbvCeqiRCwM2L7FKHSAkEJ6nvNjXQDpoX9EZBUGzmfChgFnK6SAb64LoB0EdU/uaDiFs7+zdEaAo4L9RbRMe3Nw1sQ1BlHXb1FmIOUKbuHcdAAF+r+vI7Gi7VATefbvv4HTa2Zvan+XKIP8i0gNU5sbjrMR8zg3L6EecmHbFZpO/A71EA3HhD7xeHIEF+W2BYAnpM+Ap4j1Y9VDpvUVB1k7xOhPzKF1GYT/hSJmQCMR0BCnjm49uBSK8BG0MGCOaxMScPfnhiJgarScwNkYumnSVy4CDa0Qivhv37p0hdR2E6dHtYHULmT1fWu6fhbQHcohgRBiPY41JVVy3MQuIJt1x5mKaYAeQw4ck9ds2TMFlBCIvGNZB1rkwZu0B/pcrbDvzG5HL0Y0i2P2Xn3e0zichH3ZnImR1ZvtJBrdW/o0g06n3dMSJnvX2qdl+bI/D2ibXX6JM84ccpkTTWWvrY4ryRbUHwY9Z9XTBe1e5NYLIydlp9N4zjGZ+/7iVXhpCTLPWzc+6ZKX8SWyyOFszEozGjIVnymiy5wimsnddlQjB3TtMZgVNIJ+LYoLQCuUagX0p//MS5ZnI1b6em2G9XCghOg0er7/rUg888MEOu3gxeeYralGaSBH0d21B+FsMZuL+VkdGUFsfr97MQpbUvrzS87uZX4Nnfqv2BDsVW7+zB3H+gKhwntDRoTvrV46bnrQ6P7SUxpSjy21YV5/lbUSD/G8BGeF+K4vd88W0Gppsvj18En9c6TOVdaf9BdsOKrVo2eLztkbtF7G4EoP2AjbSwutCfvW4zZd6fuRT9rISXsmaMixMi3C+qs/WmWtE276t/n4n4PKqca40t+F/62zTi8H4F7/6c7OzpyIC+E8I9GMjUc12ZOvSM7Piwe8PwYG7XysqoxDmWetd+MEGJhjD7vZib5qlqXSvg/tdCN8iNZL1d+eWhEcZfMjaPu2n9g56B0rdplDlYSEMM+URUqTVe+QQsPYXhsZzNGf1Cz/rZmqoZ/G4Hrp/NuCQRENEChJCihxXJ3x79F4Ujm1Le4z6EjmOV6eFSUK0akAGIHZDaUwJGW/IxFqYzFYD71//ZrXySDx9cx/jafO6kSIbP2Wuo25jvwz71/xcmYGXwzyXlDHEKeGZPG1hLv6k2ferkygoN6TtXbjVQLFNFFM+SIx1kuV3xaxYDNC5k4tJSES+bUQuxaRTAKX+FRS92WxEPsDVhAMMdLwDgPLf7K9PZSxsGNQThjskMQvUR44NDgiTJGUPBrn7h/uLTcBS6btbKIV/BxNmd8Y/BgEFgPndgBEwubx3wlRZGVtFVNa2DvKKzZDJ4RqTxh9lhRC9i8VQtp/rIQQ8pBHQMI+ptRC3CY2cB0eyBxtE5SuRuSmAo/Pq+n5Dhx6NafXD5dx6HS8QSdjI8/KxbetboxEJa8Ie4QgL5lGAi3KXUptKRzkghxrEhlL21hM2DuGjZhxDF4ulYlZ3RYHddYJ6G2PrIl6h9iKNLaAyWwiaFtOAKDrM+65DsoPBul+z1AY5onFjZkTMwxyaP23kGG/lNwoIjdlyGUokR0QUAtDjwccSKDYP0+uJ2lyZzmeG3KAWB5hVPyOtMGfIsGjlivId6nkCQmOubRc5Evu+K5r81xbrqTcaCLuGrzsvDUI73dMVzDQcrV8rq1kXYGjHhUu8YB67S13eeIUWi5jgeMTzgDJscvToidFXOgN4q4Cy5LaNAY7j2JM/I6Urm0ht+Lu2SYgYCBgiZYkvuJux3FtrLS2sAnOj9w2T247oeAG/6TB/cXyKNSIs2Z+J7lpZI5jCF1BEuQmnFoMCCaB43KAcmwRkilGM5F7ke+WiInkkUhHVkhw2mJ6R3Y3RpIGUCbm+DrIgsKTHAUCIzdHaZ5OiBKGljCmltvvCLsaqL3CBJ3tpTaZRTx9ST03YaoRGFiOEqCuY6vPqMhRd4vUVltQ01bb5EfMRXsT9JsJTqM4AhbUkXJ5Wy1c4Fo2DyRyPGpD6OXYlSc2xZcIvpKxZufC37/ai0KjXGjsCEr8ZNK9ss0D15NK3gfCJ0F+5OYMpp1pgmb52zaEk3keG2vyq/0olM0ILgPqJPwoWr5TgiVwORZKteco0EGqB9Nzw6WEj97gScHrh6c/KRY9qj/Vnw9EffiP2tezkz5DawubACrIRK43THVSCUiqBGIBiSkNJGEuydHCVwDBKSGhtvAGI99YLbNFiU/NSdhtpFG2le0AaeQ2wF7g+La9LJgcjC2AlQoCLoaek6PhwVNjxJWtb3DwmpyCnxTRIb/YT6gQYM8Q4BGPJeSMsvUt6TOfBgC7MCB5NoREWTGTzeplJuMzj5gO+dWeQhrFAbArXOkl0mmp51uQB47APuZOnum0gqYJDpVeMWwvOcqSnOURCyAreAsN42ASRXGpYrlsfZzO8yk5zXpDP+ShbAylJncyIQjGxXGJfiD6zx9DVeI6FXeJQTPz+r7lQTP6V8lBM88zX+ifpZkvr9E8Md0l5qPl8S6xabAhZX22TdK4Sw1lWbWuT+D0mcDnjHfJ3nJyWsufNWevZJmBruBKzdaKrscL76wVjdhjadIS/bM8Z2kJs3AZsTucphkg5uD0qKWVkB2TehnYC7RvCLJ52h2/sGPWRbadzjMH4pOQzT8d2Su4jX/CGGd9Cxs3wzmdiMMMMzg/az43jCPJW5iYwSLnL6EQJJNshACuxVwPCEmlw0iO3n4sSMYCj1sTLLt/TbuwPFrt0He4f38ENQXWNTolDlypI+7LeReKsBbygPQI8zxu55ghZ8MUKW1DgpzBZRPz1MdI+asduyzy0hHEucDJOVYIWb4LJIHUFz7Ic/dsZ3nX4KT7tN0zfUeK3BYRPNo9E9sJApJwmFHpWdAVgEhMEGE5evKJEFmCZznclEcnchHWq7W421aCzzJtAg5dO+EuQQRYASGYcOAIx8lROy/07jO5DZE6Q+CG8zzI/avdoSxKtCFKTWPKkgnu0ApsxJSsFV6A/Bwd4nbKIW4LliE3NI7rziOxiv5qbyjDMwUecA8uy3MRBNByOAugJwMfI5ijN5SDFegdT51M0DsX/v7ViZOMzTZbLgfcSYc7GBXEd0VguzBH85zxdFQVLlJnE/OQDL3hckmEZ786dZLROcVB4CWS6QSl6i9AMEVE2izHzGhO0wrcMKrBNGo2l8xotoIT7GdurkFUj6TUHrIDR0trD8aba9e3fKBe+tIXyM5RO0OYdvwLYWBeaDDHci2wXnOsBjCVz8+6z8XnGsSH/aWS+oGhd1W2zP6TKmyjrh3hIg8DP7chtGa37lu6f3Hdk+ni79mIqBezMzaiF8Ai9PdaL4C1RY1IVUdCkhU1wmA7olwKsQ3S510DNL4luEQQTISXdqCAq8aRol9VZK+uFk0HCFYPm8b9yxPBpXjE2YYEl9KlPSLtKlw5agqSmhCCWDV+Qv9T9otdkd9S8MdSBX8Q2KasDmrwPZNcDNvtrdb9hqAQ5yxNTWGgJjF1HFs/Y9hAzXf4GX8ENb8lKKTuIJNzhU30NOVc5dKSnK3gSNwQSyCTZpJfLolZ3W9YlhRJa+m0dbhyLglN+bYWMwI/Q9+v4LncEIDxL0cY2SyEobSl8G6EwfSZ0j6yPBG2Xtro70IY2yiE8XTfGJD2lq+KMM5TW2pgr5YP9x6EcZNvNuHr8RbIeHbnYMl84Mush+e5f7jvyGZiasHmN0s05UYXmf5P30+778wtN/HqXnsd116q6JmKbB7FYsj7B50nK40a/5S50fGBkXT/bbSXZ826BmffKvNnP+mOl6ZaAHWzf1Ljc1+9xbeb7n+e3zJbQLZY6oSTki/DMeukHOlpxleKQfT7Uc/pJBkylQ8aBAHyPJP+8JmrC05zmsWbsgwX2ZyJbrMGFnlHKGMl/yJfw8njDXqNSaHneE8yYoDXFzjy7yz09srzpxZuIEP6balUQiWRknLkjyG/eDkbWCmqKBs4B/rZKV2GDM3UMDfs6HOUcMrkaPmzMTNLI2deY8eefKir9Z/spAbBl9IM9Uv7TgvNlSXPlTYhMkpKkvOYpoSuxW0hfEiQB524ODgNvxyQxZOqE5sGJMRnTYzlSJfN5F0ovKYmMs6fz+jOBXB3HsN3KFCDqvxYI+xlVfls881a1L5o862pzD92h7MJYK3Jsy7X+F/v1pLMzj+iPbPiPPpjEOdU/7eKyM6fiQjPKlcU+2YSaSG5DBAwKNcVPCo/tH7ma+wlmMr8oLFnPP8amZckoDcItTZbUrbhGsbvK1049qITW/up0z6DR/xSbRyNWJfC9aHtkiVtLCT2LK4xgFzX8xcj1j9BGyOQ9H6soY7X7961kiixV0g6+0+UvN6NJakcSDZL9NPK7eKSov+o916GTG2cv5Z66D/q5VrqSg2jnD9ZjV9VD9QB1/EfdLIbCHqRTl+MPM3VmxUZ6CWno7V8fMWEard/6exBMUseFgK7IArcu0HcVpm6FpbSgZC7rk9f2mgn/d04J6ccS8kW26DsTZPG07Onc9P1K/jk/pM3rzXKj6Pk37dtyFmkvDk6Jy1h1mvitzUShrGoAxtxQeDDREGKhNBiRLoexZwxL8cRGwylZurS55Li5WCtKdFr/bS9tZx3Xxmkacqm53ha+KSg+Eu9yoyDGRShw4JEzqGLgYUFhEQATpVxlGPOYbr3DAVZS8uUP/qOnvJrAFEt2rKxpQCBYbTc2qk5aQ96GhJer97pf6YpJscdpaBkBF+lJBvplpT26x7RLQYqioDqI+k7iSJ8bFOLOT63XUl87nmvW2XqLcrYaHl5ZHCcMh8baTjbXtsUeHtHu7vVbLRfXebLZ2W+PglSXS6V5WVR3wUcS46kyLHj9qLbxnO/7awLgBqKAN9Rxf9FCrUmnYa6c/WVdr8eKKpqPOjllk2nnpFO67mDl/YHZsjkAcYV0sXzhd2s9jRQbO0nNKovmcWUhFJa1XGcRb3icwoCSO+THhrqKuk908tZherG6l58nnUyDJe2P/N0RkNew95eSf0xSNaXsrc+JXjGkvUKyNAUhRrKMukW2bA9x3Xr/WY3xXsCrMV7WyTnZ2UzgbKNEsXeAmGh3jrARtRxBMqxCCozKAcLQ4qUqTsPzSVF6rtlf2cQ1tpD/Z0kBF9PdtteCIJI5gMOsPSTcxaFbyHGAwaxhDTPSbmMJndRCBgcgqZpzOiTgn/xONZ1jM2tNABIBAbEqeMGie5RPoIWBsxxbUIC7sbenZWzDhcaeEXtjIzu1RywB1EyVGIbIs8mX/Q7Ssy+Q+W22r5cVrhBr92qD9LxkVkv3pWF3VdXLWrAWtFH4YsQhjp3BWhXR0nRkAGOgEU9WyIObYvxuNNGNOiZU8EZl8JTJliOviCcGVHEeNYPHgc3E2BCP0iTvoqvsF+X6sKyoQRBK42yzR50sz7KqKAUW8KFlApEFMpcP/Y4KtXp6v1TwIDH85wvzUnKXOMQmmbKmwZZviPcsiEwy+4cOu2R7PmtjCT78c7vVWD4rKYxsLmwMbCIzZTNSmzLwUDMUcgCz7IhEdzBxMYkxx5wNk339IS6x5wBh6beLT9X3GVx6NWUoJukUfjjPdtro1ApViQsHAAQ+DbTKHTioZ82V6ZiIAnEXHo4R1kIYbqYlhjqQ2AcLvyxgcAX965BVBWbwp79y7CHMKVcCT4Kke87jFgewHMJaEPILdfD0AFeQIHMEXs41QUTQ0NXtXgq7I/1mrwh/FzptMK+08hg8MenUq+LQQIpo8Ry7EA4PvYsDxE618IABZYExPUDwBiUOe44FuNO39DBxKCDyfr71w0UgE5DLZaTRB8CP957vCb6CMUYEmIJ6QVIqA1uABadJZEtFRgVNl0g1Pdz7BJKUFICIkOTUAgMAQyUSwVdxmuH4ga0PzxE/A7iE4gsjwqJPBQRH82J70hs2dLFujuwDVCOzbZskm7OZDNTcyZT2CCHpv4IrOCh/dbOJh+c87XUGUUP6Ui0eQSA/flIB0fTeLAF82xIOxSbwx21y+TKhJr9nUCbHsi4+ET/nTz96jPx6M7yNdJdpJgw3UPuU8VSDp24tdA7p4ptRoJso+72ZDhyUj5ABH58Jv7a4lmnHCrLEPtC6WDPCqCNYl80QJbv+NSzbZf4jp2jeGZ2xhdtyrIHhqBaesLkz7QM6z0vDb2fH+TAMNpc6ZRrzLignCPLVoYdk4G09JhrDSzbsn3mW1xAKTW0IM9R77N0toBpdJPJ9fyp+a6fKdsMLj/ZqaWx9ePT/9cUa1wABgCxuAIfUgaoheHM7WxbwqfUAshHrg+Y7wQ5zl5HHKTFGmVZAJryUsUnO/ySPSdmluBzVwn4ao+OFfrhU1OPila/47/RzeOret9nuWR2c0kueT04k23ilrTA50a8IcVv9borU5uNZNZiDjAVNs1ERwzDRk0j7t7RiXhj5WRL/TP+V70N0zj4be5BJS254mFLUImUTgZaWs51tRA8sIjPpSccLgKep3sQp6MjhgxT46Am8sna+qtaBmqieTWpYySb2jRwPiPkT9xsqCeb7f5SB8F2r7/mjX4liy8tcILD4esu2E1sFYjSk0i+uFXg93OLxtpP5Zbrvcrmc8vSAie55ec11qQMfCu3ZD3Ccd7m7/L+a8uCC2F5gU0cjpi2LGRsWTBiAcolUKKN01xHfoL01CZDgxHbYN6SHCY4o3hW2cb6/j86mCEOHoAdJJYDCHAHoPh9brObFoyzKZ5/KHaIwIJAxPTIj1QV4rsb73Ob7OiqVnViyBnhqfOqHdoOB9S2599J7cPya5mOoGmuSB7j8TIK3Y36A6ztIdjKLTlN05sC03CgfJqlZ4ken2RjZVZ+8UqUEXorySZjVDLmlU2RTfnNlcso0BTK8ooupv31AP+46GLWkO73HBnU02Y0et2M3krjz0bEtkiAbV8XtmAIwTwEJANmUezoAi0PMS/HglEI07b/YjOw7IM3RBYXLSG3wbtZ96SnvprG4G+Lb2sMckIsV0DGXF9qDML5BoQDZvkO8rgHPUrdPLNvaSa+TUkWhNSwBaHrb0E2FoNhRw8XdtvtDAzXi3VvBQwFIcISnue4DotgiOcwtF1ucce3ESCu6wU5tn8iGUkYN55d9oLECW/5p98a7Mx3DKH88dTXc2EV9ZHFHNt3lJZRUofw2Aui9CJhtqRC2pKDHDvk2DQZX0EgnkX5Vg0AW18GbYbAUeZzqOgcSqfnpXMg0C8reopQR4Fupso8m85QZ89RRymzgCddZYbZDAV5DkYV6ZZzPCtzFnIg/55zBpmzHa253kN9TC2CAHOYHVF/ngFjA9e1uMC+juna2M/R8woBxGnLJ25fu0R/YejLZa+vcr5I6Lh6wlUrPZBhHlH4bQKFESvgPgzmAiVOqsLMsZgPgkBgJHyZI6REqqIcM4MNwwxajPyk3dwzxkqdnr7PmszkpeBf5kCYAY4jS+2niB/MZJgzBxxE0CLCxwF0OAk8mSPgUkNmCM82FjT6D9Leto0G3Ms1a6MwTPctmJfE/zLoMQQsxAABZAY9d64+HQ9bHnSZg9R3YJ5NLSlMh60NUWtTEOBHNd17EXq9th4l6dWctL8A/zK31Qx+nFjS44EnQAJ+xPOtICA8kNRmgU3yLJlESfiR1ZKX39FSdWN9Vo7vdPrS+7fnKPSlUPjLvFYRCoX2U3FX+yY0CunceWrbyNbFQYHgLkaOyLV2MwlC+pyunEjeMWjgd3Q921gguu16K10jhH9ZHccMgVT3zFDScLaLjREoJA8sHwWAIOoBT+QoB3m2Ok0YNh3AVD38kxTxGwj0tTruZaTgj29h9a5mkwLr/X7JJR5Qr3XnUY/Ny9i5gqTLWOD4Gq8kx1imyJSzsawzhZnswfWL2DcWhjXf78tmJw3D3+fOhQAQbGmHpgtmypjMRaEn1Y7E4RKqDQP3ghy7aKC0SUgNdbqmGbpkiwRho96Xip7pUTZzc+O3YZAr6AXQBZ69jEFb3aTlU+Bi6QnukxxDCgSlLUJoSuY2hBRit+A2gHAk3X8b7YckBCH68Y6Z97Z+jkaOIOZ46oPEYC+bWtBDgStc38Esz45Cz3O8FhtkKAx7E9ugkXkuvcDfqsLJZNB+LGXXb4V+Pey/mI5rytz9Ugs1vr+EWCavB2a2JmkYglS+J8Q2MsHRtFP+5DSjD8IRGgvKFawGY71qurmuPs2g02lMNhad0e3+G93sv/GtJoH6uvDeGqAKjjKC07ChjtNTEhr8kz06H8WpbSwD9MO+s7lSM769JBZfd25uDRYxSo41goxkt9Q/UGKaFXhTPjheo765UIzvL4nF192cW4NFlO5SSLHJ1f0daPxSEOiq6TQEfryX8R1ba4hsC1HXx3NPN4sT1TmzbJcwDHQjVZhnrgPKdizi2UmaxLC3/lEdet+qfO+1x5Ow20iD8Df6GJUQslwMgHYyLoHQltKzOPYC3w2Q79Ecwy00NcsVYkPKKGSGnMF0z8sfjUG1Q5AZMfgbPYy6A6UACM3TVukiVR5Qizrch9KDAQryzLvJdsQihkiLcWoN2aLkh57s19SlfDed/0V/X66rLuXnFoEe9PkMh3HyfED0HCWK9Uxj2/d5joHn1MxViA2li/NBVSkUblHAT0HES+9P+Y/PfX2nn1t3fS7ZUHgEucujtW3PDyxPCASlzyFyc0Qh5ikUEpHdlMSNL37s1Jq3ahedpuukpeCPB+F7pCATlocZw7OMfxoXkSDqW8h2GHYdTDyRY8a/nfbQ4GxzSmPvptc6JmysiyZsOr2+v7EemtntZSOP82z37XfSKOhntsgiC8htd9KEnfpDu99MZyTS35eZrUSiLSzEbTuYZWbTeU2KLXxgAQKYJ6XwZZBrIlhaJkLTPJPXestsqFA0R1AGLVeTf2Ol4uL+kszwS2IoENBU4JnALQk8m3W00n71DUZjfHtJ19HrknlrwChYZq41AVkX9rbr58EofEjH0ehv9F9jZFtqlwyRM/PazMumBLFtS5cqeRK7ngNBjvtlO40/appzyD9t1twXK+cRDGUr1G2ON1QePt9gkh1eHz27PRIxpZwZM8Fx2wXiQ/tfOe4MnTQK2O9z4nCouwxRR1BPaqHouotOICKwOLUJ4h6zPSfHHQsVmVRZYxkfNLVh/pwJnMUyqRfPx7vuU+H6nt9Uny7OrqxFjviPaw8TsQXenb1VHKAoh/bq1UL5cgRODh7au+rP+dVNrXjzoF5Viuqv/fHe7r0+7kxg8Vi/2LsrHN3enalX4ZX667T4cHiF77itf1soNooX1UuyK7EyLgvwzrsquId2ta/IUzpVR0TRKxR3j/gufqg9HHV3/ZG7eznYk3t/r45HZXLcP3osFMPCfuFi9/7iQoJOsBvePfU67qM4fbxS0qFUKd/hA2I753eKTRXsCp3qSYeeTvFuuDcI+P5w9zw8Orvd9Y4unLMj+2rXu7+4PzsiF7v2/eV9uEsuHkb3l+ShQEYP43slXgvt0eP4qDCuFR5GjzX1wLVSfbRf29sv1A7ru4ePe/ulh8O94sHjXkU96kHxYH+vXNytHBRvT3fLxaOzg6I82pVnuV4T16EoTDvjg/EBPTlFBVIqFQ87p3eP94+eDEoV9xh3vHPO/97au57n1e1W0NmX02n36FDWhvYVaxX7lfB6cmbX7o6Gx4fsAt/4l2CPe9UhOjjdP7Iv1FLq/48LTTIdXin0Fur1W+9y7+ZAvbQHRwE6PN1vH5yfXp5eUFmqsl2g1MTYPcfsYDi6b/FpwQ0PafX+5h5yG8IibFxrMdLYOy75t+R87DtNP2CX962z7tnFcbd1OLzoajEzaQ80JwYNtt/unLROdh2hrkiPLg9ZsTsq3dC7SXu8W2XeOe7s3gfqM3zQrw6bZ64Pzhqn/UJ3uMfEgz8uj552L4/V58w79cPzxt3DYfSu4DxO7k9HoxvnXpwMC8Oq3H8MLvRF4U3jHp1PeuPDxvXj6A5j0vBOb9VvgnL37JrfXKID/6ZUctv0JNjdr8rClXc09XePr3noDdqn5WIlYKe3g0fQ8R7GZ3eXtZpoDid706cBOVJnaU8hLVba49O7EW459tnpBRyBx8GIPI7379q359q8mVypv9vq/8ej0X7lftTsn0HmonEFjEJemdo8rA4bnFz69cPi/l1hUDnujILW9OyuxKtOkeARb43L52zK7+Rj8TF4Gh9Vm/hvs1P7yyqP6rSXt43xYb3Weaz7DcWWj9OpX7u7aB3QI0p6zG12a0qiFs6un2bMU725b7jtQrdSb3a8i73q4OG2bXcbe+fXY6lsn1L9tjjYh+zKDvZl5b44lU9lWXh0uxV+7MtyoFbNvyt3dgu7k8qAjm6OJNs9uaWHl3/PHfB3OqjvtlqXWtC6j0TWii3/4PySV0ndhntXh2Xa7kwJLEZ9HMb3e4dFWS1XLk9PydWepklR/RWtQK+Ex5eFY48Wu9Oi6ITnykgslR/6F53J5XWvwsrHYTUclAi7HOwWy8GhdzaunQbMbhYKrcvHG4nLx63R5Fqdr+TsOkqkFvynk6sB2+P4uiIPx6d2ZUz68Ar2bk4arpKJpWq5XDitKD1TuH869YpTgP+Gp/T+asqGxw8XgvS7j+fkktS4snULBe/s4XIfP6Lu2Z1i31bj4Bxc+3uV3UnXc+3biu/dQvJ0OAor9+rbvQpg9lVR7hXOyuf6MTv38p4OL3ebh/qpNUi86Z22Xpy9ysGovB+U9eOWptfjSn23enAucXE6wtVurbBbUdf0lFItnARVMDkqgHs4bfLxkfMXy4dr5wbjw85h16/c9cbXZ+JafbE2BNfdcHwjxkgL08fp/dVhsY1O9aUlQ7DhPRTAkV2sO54S2YXpXyWI7PrRzcUjoV1cIIVq+5Jclq4frx+dg8YU9GTXPxwPzoPbbumu/FRsHNZa1UHh5mC6d9UeFyqF0qmEtWtWVI9ewpPb6aRWfLJPoHMrT0andqlTJRfdx4MCuTskdHRFerI9Pj/fO/fp8Pp+/1RJjuuL6Z1kvnN6p8306+JJ/6LuyEOnMe5XgyY+Oru8buucl9JtMLk5G+lvwU7/4ObosCl2L6jnHP5VD3JQ6SBwePm4z3aLRfvmRkw0v+5LVuj0rgG/A57milG3XPZ2C+KgvidPxlpnHXUuZWRCFU4amNX3i4C7N85wXCqro7WL3cmBDI+qt/cjhZICdIsRblv3vSMtWp3J4OGKFK5643G5QYk/aQ12kX97al+TQ0fJOPdMfeugFD6K9un1lX97BS/ru+jeJsEE3j14zVEBXtTv0f3tdVUr08fBVfO8+KRgNS5el7vjnu8EF43RceOyfkHGdwR4/uhM2TRUH8CXznDSCr3ol8X+4Vgt7qE6//1ofO7dDYa2uj/kKGIdHD8eP/KSK+sj9dWnfrHS6o07o0tyULqsP0z+3j7qBYDs/sI9rd2eEwGuqqLLbo6QPyjtnQ9HjJxoecbGl6XgfNpFB5PyY/GhcuDdkE/Rjbnr4+PR9dPtuSjcBTJSJp5SU0rya6U0nZ7RSmUSCSK9DPbIfjx5OqiLCj0e8dLfYqVzrQ0dGZxKTO+0ZHk48Tx0QK9x9TisKd4snQ0ORdW7UdZtZ19DCp6PNTOE/Qv97rohXM6x2jPealV33Zpq+VxlPZ3OUJhyerdvDy6Gtfbwmh/33XpbixCN3epu76j6VL05Cvxqaz/AV3/vW6ey05oKKLVEnVyjqqieXu67x3+puNViZTKAvL+nm4QVPP140fOq/2nlDzViw/N+eailzVn/dujjmrauJkf100rb7d7gLr6l1VCz2nm/ctQ6GV444Go4uK2MHvktpNWp2z07JIzq35PrU3ooixUoGlrXQbUcYugcDJ+Ohk2uVSCcdCfwlnSDp6EOC5Y6waA7Lt0NryWowkbzTiDn6O/NEBGf9nsXQe+qOxhfawWKu4NeSzPDDUQHhRGskEn3xrl+OsBVuzrUNJr025cwmF6f3J+rU1679f69UjgK3aX96dHVCeM4kJp9K5fyAveql7VAFwZedx8n4PboBl9daIvgrlwYNxA77El2bYvjCq/uh60q7Wi9eXLtIXZ8gM/GF115Jqb0pq3lJ5tqoBzsnx6hI1kuDJ6elAxRJlGhP3pU8rxw+bfsjTk833cHD6JH5fi8XrmWJwfY6Vdunfuwuve3f/1E/eP9Sl8ee2zsn5Qr+ofnDO6djfcrgWZmpA2Zv2cV8nfqn+KuV9rvlHtaQE30X+zM89v3zdKDHD519u9OHq87t7f33erxtQBOdXJ4uleDjuO79RYa9mvw5AhWtVG2f3MylQ3ISrz8eHHPawPZnV45YZUqst+Wwr1T0VG6/bx+dWKfQjrhNVh3z4fF/WGdPj1MmCzUO8gfVv/u++Vy6+SoUZWVE+7sFYL+U8WvdkqNq4PbOxEpKuiKYjh2KgcPA32/jwe1C9bvNod7drXu+hf+3eW9f3sjYXe4Xynaj5UjZwx3vaK6v1rr6qRfuC7ASghOu/2D3j0ZlTWM64+ef1u7RUpY1YeX+EyCcgiaf2ELnV+eXaIhdPcOj51OeFFzzwZPTdEBlV7jL78aP3SFPw29/dMSkqcj1KU+2NecUiAP+3bvENQG0y44CS+65+deudd4uD+B4WPdGe5Nodd97KBB6bhXvW3faGXtaVDjZqRrmtUubNpl93Rc2hPDg3IgQ4pGFDqtvVN4d1R2g6rTUyS/uR0MxnjQOizXtDYmbf9wvz/jyWMXjp9w96Gidomo0D0Dwd31kISdyfS0L58g5IfX+1Myxpc1eDf0taVmT8nTWNtsBds/PD5vTbo6N7SkydvssruwSVgHn1a74dWEiSHFe5oj9jzg26fH4qmGNe9MemI/OL9W0gy1n9i0dX14PZheDLpi6uIyKTdO8F6tMe0Wyz3nZKD1Krv6e6JBeYrqh7f7l+HB7t9xL8Tt6eVA1JrjQRPctDrtgye11fxbvau6juKVCjm/8U5bZ7fl3mXXvfD6rb7PZk/90EX3bIiephWPMX+/6TnO2fTauZ4cDRRxvCcMBrLoe8NSax/S5plzPKgJZ6B48+/Iv20Xw8Hwuh2M9KahVMWDznW53WpeXlSmd21/l9Z2K8P+3lmldVc+D1pHWvjhy2FnXChAX1OvNzhooP7jsM26xwPYPjw4pQfysn8zOH28u967fxSPe6w17d40a3u1fnl0yG6CbuiDU3x3dfCgqQRKnttFx71O4/YIXnXKe+PKoHLBWxN2GzyUqlV/XIPlG2XF4p5ri95p+Yjfe7vBnQPLjP4NHgsjbXHeugf7StQe9asNoYe/FUJy3Zs0mg4M3E5FENxxZQ9Ou3WkbZ1rZ6j31SMUlqrOEQoRdsvOo0bkyZ1alL8dBa/94Vm1wg5OvMPb0djpXkrQvTm6Dp+GhDwe/+WywNx7CIannStZPS8+hq1HaD+J9vno6d4fVGsDoi5ww8/gYXMaDMioA8rDE0HuKv3yFTz3j8dNqQyZQvlGsezhTfks8OSUnJcnuFxrkCIrsLvyUUXzhdMGj61O70mcXE/9p3rz7LA7PXSP7HG7yh/1+p3c3wwPTu7DMvZG43Yd3hWnJyfn4MY7uBwETyc38Kn9SMvtEB+hKi81KjXcPGfdB3zBIuaT3adRoXz5OA6loAedvaESDM2mrB8MhlqpUk0jl7r1xtO4dnxY5PSQPdz2J0RnbRUK7dug5u/5XrF1Xa5dhceYX3V8FuLj24tj76S0f4Lk4V/YLZ0eXo7oLTu/PNCcxcp00nJrRZcCJaK6w861N534NarA8ngRVC8PzrWY69pPt4fw9kr2L08Pe02sjKZjst85dknv/vyekvvBMZqcgwmW9Xu7pb5+3ejVT6o2x8XAa8ECPuOaW0/BPaj3cLn/+NBpi5Y7vLtoV29vcO1ijC73W/x8twsei05v6pc6RzaaBi6ZTImsCsUuxT1RkWSqrYzScaVxAjRstLjSdn65HE41lYfa/Sca15Nu/9h/PKpAuzP15O50t+vb3u3DUXEzLLb3XPO0cHZZIZXb+4fQ+XuK3e6RdmbtXt1Uy5cndO/+6Eh722JHZsZrafBtvpyNlmo/m00LXyTfJiI778vI7elhCc+fHfScTu2s7WsXevH/AQ== \ No newline at end of file diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md index 0d478d73f..5a2ae5344 100644 --- a/docs/Demo-Sites.md +++ b/docs/Demo-Sites.md @@ -7,7 +7,6 @@ Location | Netdata demo URL | 60 mins reqs | VM Donated by London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**
(this is the global Netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/) Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com) -Roubaix (France)|**[ventureer.my-netdata.io](https://ventureer.my-netdata.io)**|[![Requests Per Second](https://ventureer.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://ventureer.my-netdata.io)|[Ventureer.com](https://ventureer.com/) Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/) Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..8dd0c7a63 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,7 @@ +# Read documentation on https://docs.netdata.cloud + +Welcome to the Netdata documentation! While you can read Netdata documentation here, or throughout the Netdata repository, our intention is that these pages are read on [docs.netdata.cloud](https://docs.netdata.cloud). + +Links between documentation pages will work fine here, but the formatting may not be perfect, as our documentation site uses a few extra Markdown features that GitHub doesn't support natively. Other things might be missing or look less than perfect. + +Now get out there and build an exceptional infrastructure. \ No newline at end of file diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md index a71897f4b..c4def5f6b 100644 --- a/docs/Running-behind-apache.md +++ b/docs/Running-behind-apache.md @@ -3,7 +3,7 @@ Below you can find instructions for configuring an apache server to: 1. proxy a single Netdata via an HTTP and HTTPS virtual host -2. dynamically proxy any number of Netdata +2. dynamically proxy any number of Netdata servers 3. add user authentication 4. adjust Netdata settings to get optimal results @@ -145,13 +145,15 @@ sudo a2ensite netdata.conf && service apache2 reload ## Netdata proxy in Plesk _Assuming the main goal is to make Netdata running in HTTPS._ + 1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate 2. Go to "Apache & nginx Settings", and in the following section, add: + ``` RewriteEngine on RewriteRule (.*) http://localhost:19999/$1 [P,L] ``` -3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. +3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. Repeat the operation for as many servers as you need. @@ -164,6 +166,7 @@ Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get instal Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata` +**Apache 2.2 Example:** Modify the virtual host with these: ``` @@ -186,6 +189,34 @@ Modify the virtual host with these: Specify `Location /` if Netdata is running on dedicated virtual host. + + +**Apache 2.4 (dedicated virtual host) Example:** + +``` + + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + ServerName netdata.domain.tld + + + AllowOverride None + AuthType Basic + AuthName "Protected site" + AuthUserFile /etc/apache2/.htpasswd + Require valid-user + + + ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/" "http://localhost:19999/" + + ErrorLog ${APACHE_LOG_DIR}/netdata-error.log + CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined + +``` + Note: Changes are applied by reloading or restarting Apache. # Netdata configuration @@ -230,6 +261,14 @@ You can also use a unix domain socket. This will also provide a faster route bet [web] bind to = unix:/tmp/netdata.sock ``` + +Apache 2.4.24+ can not read from `/tmp` so create your socket in `/var/run/netdata` + +``` +[web] + bind to = unix:/var/run/netdata/netdata.sock +``` + _note: Netdata v1.8+ support unix domain sockets_ At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this: @@ -265,6 +304,6 @@ apache logs accesses and Netdata logs them too. You can prevent Netdata from gen Make sure the requests reach Netdata, by examing `/var/log/netdata/access.log`. 1. if the requests do not reach Netdata, your apache does not forward them. -2. if the requests reach Netdata by the URLs are wrong, you have not re-written them properly. +2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly. [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-haproxy.md b/docs/Running-behind-haproxy.md new file mode 100644 index 000000000..2c1835f50 --- /dev/null +++ b/docs/Running-behind-haproxy.md @@ -0,0 +1,280 @@ +# Netdata via HAProxy + +> HAProxy is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic web sites and powers quite a number of the world's most visited ones. + +If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. This can make it possible to connect to Netdata at https://example.com or https://example.com/netdata/, which is a much nicer experience then http://example.com:19999. + +To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata, the following configuration can be used: + +## Default Configuration + +For all examples, set the mode to `http` + +``` +defaults + mode http +``` + +## Simple Configuration + +A simple example where the base URL, say http://example.com, is used with no subpath: + +### Frontend + +Create a frontend to recieve the request. + +``` +frontend http_frontend + ## HTTP ipv4 and ipv6 on all ips ## + bind :::80 v4v6 + + default_backend netdata_backend +``` + +### Backend + +Create the Netdata backend which will send requests to port `19999`. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Configuration with subpath + +A example where the base URL is used with a subpath `/netdata/`: + +### Frontend + +To use a subpath, create an ACL, which will set a variable based on the subpath. + +``` +frontend http_frontend + ## HTTP ipv4 and ipv6 on all ips ## + bind :::80 v4v6 + + # URL begins with /netdata + acl is_netdata url_beg /netdata + + # if trailing slash is missing, redirect to /netdata/ + http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ } + + ## Backends ## + use_backend netdata_backend if is_netdata + + # Other requests go here (optional) + # put netdata_backend here if no others are used + default_backend www_backend +``` + +### Backend + +Same as simple example, expept remove `/netdata/` with regex. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-path %[path,regsub(^/netdata/,/)] + + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Using TLS communication + +TLS can be used by adding port `443` and a cert to the frontend. This example will only use Netdata if host matches example.com (replace with your domain). + +### Frontend + +This frontend uses a certificate list. + +``` +frontend https_frontend + ## HTTP ## + bind :::80 v4v6 + # Redirect all HTTP traffic to HTTPS with 301 redirect + redirect scheme https code 301 if !{ ssl_fc } + + ## HTTPS ## + # Bind to all v4/v6 addresses, use a list of certs in file + bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt + + ## ACL ## + # Optionally check host for Netdata + acl is_example_host hdr_sub(host) -i example.com + + ## Backends ## + use_backend netdata_backend if is_example_host + # Other requests go here (optional) + default_backend www_backend +``` + +In the cert list file place a mapping from a certificate file to the domain used: + +`/etc/letsencrypt/certslist.txt`: + +``` +example.com /etc/letsencrypt/live/example.com/example.com.pem +``` + +The file `/etc/letsencrypt/live/example.com/example.com.pem` should contain the key and certificate (in that order) concatenated into a `.pem` file.: + +``` +$ cat /etc/letsencrypt/live/example.com/fullchain.pem \ + /etc/letsencrypt/live/example.com/privkey.pem > \ + /etc/letsencrypt/live/example.com/example.com.pem +``` + +### Backend + +Same as simple, except set protocol `https`. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request add-header X-Forwarded-Proto https + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Enable authentication + +To use basic HTTP Authentication, create a authentication list: + +``` +# HTTP Auth +userlist basic-auth-list + group is-admin + # Plaintext password + user admin password passwordhere groups is-admin +``` + +You can create a hashed password using the `mkpassword` utility. + +``` +$ printf "passwordhere" | mkpasswd --stdin --method=sha-256 +$5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 +``` + +Replace `passwordhere` with hash: + +``` +user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin +``` + +Now add at the top of the backend: + +``` +acl devops-auth http_auth_group(basic-auth-list) is-admin +http-request auth realm netdata_local unless devops-auth +``` + +## Full Example + +Full example configuration with HTTP auth over TLS with subpath: + +``` +global + maxconn 20000 + + log /dev/log local0 + log /dev/log local1 notice + user haproxy + group haproxy + pidfile /run/haproxy.pid + + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + daemon + + tune.ssl.default-dh-param 4096 # Max size of DHE key + + # Default ciphers to use on SSL-enabled listening sockets. + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +frontend https_frontend + ## HTTP ## + bind :::80 v4v6 + # Redirect all HTTP traffic to HTTPS with 301 redirect + redirect scheme https code 301 if !{ ssl_fc } + + ## HTTPS ## + # Bind to all v4/v6 addresses, use a list of certs in file + bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt + + ## ACL ## + # Optionally check host for Netdata + acl is_example_host hdr_sub(host) -i example.com + acl is_netdata url_beg /netdata + + http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ } + + ## Backends ## + use_backend netdata_backend if is_example_host is_netdata + default_backend www_backend + +# HTTP Auth +userlist basic-auth-list + group is-admin + # Hashed password + user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin + +## Default server(s) (optional)## +backend www_backend + mode http + balance roundrobin + timeout connect 5s + timeout server 30s + timeout queue 30s + + http-request add-header 'X-Forwarded-Proto: https' + server other_server 111.111.111.111:80 check + +backend netdata_backend + acl devops-auth http_auth_group(basic-auth-list) is-admin + http-request auth realm netdata_local unless devops-auth + + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-path %[path,regsub(^/netdata/,/)] + + http-request add-header X-Forwarded-Proto https + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-haproxy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md index b38d27fa9..81ebc1a75 100644 --- a/docs/Running-behind-nginx.md +++ b/docs/Running-behind-nginx.md @@ -1,9 +1,43 @@ -# Netdata via nginx +# Running Netdata behind Nginx -To pass Netdata via a nginx, use this: +## Intro + +[Nginx](https://nginx.org/en/) is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server used to host websites and applications of all sizes. + +The software is known for its low impact on memory resources, high scalability, and its modular, event-driven architecture which can offer secure, predictable performance. + +## Why Nginx + +- By default, Nginx is fast and lightweight out of the box. + +- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server. + +- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism. + +- A proxy was necessary to encrypt the communication to netdata, until v1.16.0, which provided TLS (HTTPS) support. + +## Nginx configuration file + +All Nginx configurations can be found in the `/etc/nginx/` directory. The main configuration file is `/etc/nginx/nginx.conf`. Website or app-specific configurations can be found in the `/etc/nginx/site-available/` directory. + +Configuration options in Nginx are known as directives. Directives are organized into groups known as blocks or contexts. The two terms can be used interchangeably. + +Depending on your installation source, you’ll find an example configuration file at `/etc/nginx/conf.d/default.conf` or `etc/nginx/sites-enabled/default`, in some cases you may have to manually create the `sites-available` and `sites-enabled` directories. + +You can edit the Nginx configuration file with Nano, Vim or any other text editors you are comfortable with. + +After making changes to the configuration files: + +- Test Nginx configuration with `nginx -t`. + +- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`. + +## Ways to access Netdata via Nginx ### As a virtual host +With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below. + ``` upstream backend { # the Netdata server @@ -30,9 +64,11 @@ server { } } ``` - ### As a subfolder to an existing virtual host +This method is recommended when Netdata is to be served from a subfolder (or directory). +In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`. + ``` upstream netdata { server 127.0.0.1:19999; @@ -69,7 +105,9 @@ server { } ``` -### As a subfolder for multiple Netdata servers, via one nginx +### As a subfolder for multiple Netdata servers, via one Nginx + +This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via subfolders. ``` upstream backend-server1 { @@ -114,34 +152,33 @@ Of course you can add as many backend servers as you like. Using the above, you access Netdata on the backend servers, like this: -- `http://nginx.server/netdata/server1/` to reach `backend-server1` -- `http://nginx.server/netdata/server2/` to reach `backend-server2` - -### Using TLS communication +- `http://netdata.example.com/netdata/server1/` to reach `backend-server1` +- `http://netdata.example.com/netdata/server2/` to reach `backend-server2` -In case the Netdata web server has been [configured to use TLS](../web/server/#enabling-tls-support), -you must also encrypt the communication between Nginx and Netdata. +### Encrypt the communication between Nginx and Netdata -To enable encryption, first [enable SSL on nginx](http://nginx.org/en/docs/http/configuring_https_servers.html) and then put the following in the location section of the Nginx configuration: +In case Netdata's web server has been [configured to use TLS](../web/server/#enabling-tls-support), it is necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append the following parameters in your `nginx.conf` ``` proxy_set_header X-Forwarded-Proto https; proxy_pass https://localhost:19999; ``` -If nginx is not configured as described here, you will probably receive the error `SSL_ERROR_RX_RECORD_TOO_LONG`. +Optionally it is also possible to [enable TLS/SSL on Nginx](http://nginx.org/en/docs/http/configuring_https_servers.html), this way the user will encrypt not only the communication between Nginx and Netdata but also between the user and Nginx. + +If Nginx is not configured as described here, you will probably receive the error `SSL_ERROR_RX_RECORD_TOO_LONG`. ### Enable authentication -Create an authentication file to enable the nginx basic authentication. -Do not use authentication without SSL/TLS! -If you haven't one you can do the following: +Create an authentication file to enable basic authentication via Nginx, this secures your Netdata dashboard. + +If you don't have an authentication file, you can use the following command: ``` printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords ``` -And enable the authentication inside your server directive: +And then enable the authentication inside your server directive: ``` server { @@ -152,9 +189,9 @@ server { } ``` -## limit direct access to Netdata +## Limit direct access to Netdata -If your nginx is on `localhost`, you can use this to protect your Netdata: +If your Nginx is on `localhost`, you can use this to protect your Netdata: ``` [web] @@ -163,7 +200,7 @@ If your nginx is on `localhost`, you can use this to protect your Netdata: --- -You can also use a unix domain socket. This will also provide a faster route between nginx and Netdata: +You can also use a unix domain socket. This will also provide a faster route between Nginx and Netdata: ``` [web] @@ -171,7 +208,7 @@ You can also use a unix domain socket. This will also provide a faster route bet ``` _note: Netdata v1.8+ support unix domain sockets_ -At the nginx side, use something like this to use the same unix domain socket: +At the Nginx side, use something like this to use the same unix domain socket: ``` upstream backend { @@ -182,7 +219,7 @@ upstream backend { --- -If your nginx server is not on localhost, you can set: +If your Nginx server is not on localhost, you can set: ``` [web] @@ -194,9 +231,9 @@ _note: Netdata v1.9+ support `allow connections from`_ `allow connections from` accepts [Netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. -## prevent the double access.log +## Prevent the double access.log -nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: +Nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: ``` [global] @@ -205,7 +242,7 @@ nginx logs accesses and Netdata logs them too. You can prevent Netdata from gene ## SELinux -If you get an 502 Bad Gateway error you might check your nginx error log: +If you get an 502 Bad Gateway error you might check your Nginx error log: ```sh # cat /var/log/nginx/error.log: @@ -215,4 +252,4 @@ If you get an 502 Bad Gateway error you might check your nginx error log: If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`. -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() \ No newline at end of file diff --git a/docs/configuration-guide.md b/docs/configuration-guide.md index 2a9539dca..1c79e0276 100644 --- a/docs/configuration-guide.md +++ b/docs/configuration-guide.md @@ -59,7 +59,7 @@ Entire plugins can be turned off from the [netdata.conf [plugins]](../daemon/con ##### Show charts with zero metrics -By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. ### Modify alarms and notifications diff --git a/docs/generator/buildhtml.sh b/docs/generator/buildhtml.sh index e1c108fb5..dbd303911 100755 --- a/docs/generator/buildhtml.sh +++ b/docs/generator/buildhtml.sh @@ -27,10 +27,9 @@ find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o # Copy Netdata html resources cp -a ./${GENERATOR_DIR}/custom ./${SRC_DIR}/ - # Modify the first line of the main README.md, to enable proper static html generation echo "Modifying README header" -sed -i -e '0,/# Netdata /s//# Introduction\n\n/' ${SRC_DIR}/README.md +sed -i -e '0,/# Netdata /s//# Netdata Documentation\n\n/' ${SRC_DIR}/README.md # Remove all GA tracking code find ${SRC_DIR} -name "*.md" -print0 | xargs -0 sed -i -e 's/\[!\[analytics.*UA-64295674-3)\]()//g' @@ -39,7 +38,6 @@ find ${SRC_DIR} -name "*.md" -print0 | xargs -0 sed -i -e 's/\[!\[analytics.*UA- declare -a EXCLUDE_LIST=( "HISTORICAL_CHANGELOG.md" "contrib/sles11/README.md" - "packaging/maintainers/README.md" ) for f in "${EXCLUDE_LIST[@]}"; do @@ -56,13 +54,12 @@ MKDOCS_CONFIG_FILE="${GENERATOR_DIR}/mkdocs.yml" MKDOCS_DIR="doc" DOCS_DIR=${GENERATOR_DIR}/${MKDOCS_DIR} rm -rf ${DOCS_DIR} -mkdir ${DOCS_DIR} prep_html() { lang="${1}" echo "Creating ${lang} mkdocs.yaml" - if [ "${lang}" = "en" ] ; then + if [ "${lang}" == "en" ] ; then SITE_DIR="build" else SITE_DIR="build/${lang}" @@ -86,16 +83,21 @@ prep_html() { if [ "${lang}" != "en" ] ; then find "${GENERATOR_DIR}/${SITE_DIR}" -name "*.html" -print0 | xargs -0 sed -i -e 's/https:\/\/github.com\/netdata\/netdata\/blob\/master\/\S*md/https:\/\/github.com\/netdata\/localization\//g' fi + + # Replace index.html with DOCUMENTATION/index.html. Since we're moving it up one directory, we need to remove ../ from the links + echo "Replacing index.html with DOCUMENTATION/index.html" + sed 's/\.\.\///g' ${GENERATOR_DIR}/${SITE_DIR}/DOCUMENTATION/index.html > ${GENERATOR_DIR}/${SITE_DIR}/index.html + } for d in "en" $(find ${LOC_DIR} -mindepth 1 -maxdepth 1 -name .git -prune -o -type d -printf '%f ') ; do echo "Preparing source for $d" - cp -a ${SRC_DIR}/* ${DOCS_DIR}/ + cp -r ${SRC_DIR} ${DOCS_DIR} if [ "${d}" != "en" ] ; then cp -a ${LOC_DIR}/${d}/* ${DOCS_DIR}/ fi prep_html $d - rm -rf ${DOCS_DIR}/* + rm -rf ${DOCS_DIR} done # Remove cloned projects and temp directories diff --git a/docs/generator/buildyaml.sh b/docs/generator/buildyaml.sh index e4a5466a4..f887c695d 100755 --- a/docs/generator/buildyaml.sh +++ b/docs/generator/buildyaml.sh @@ -48,11 +48,12 @@ navpart() { } echo -e 'site_name: Netdata Documentation +site_url: https://docs.netdata.cloud repo_url: https://github.com/netdata/netdata repo_name: GitHub edit_uri: blob/master site_description: Netdata Documentation -copyright: Netdata, 2018 +copyright: Netdata, 2019 docs_dir: '${docs_dir}' site_dir: '${site_dir}' #use_directory_urls: false @@ -67,6 +68,9 @@ extra: link: "https://www.facebook.com/linuxnetdata/" theme: name: "material" + palette: + primary: "blue grey" + accent: "light green" custom_dir: custom/themes/material favicon: custom/img/favicon.ico language: '${language}' @@ -85,7 +89,6 @@ markdown_extensions: - footnotes - tables - admonition - - codehilite - meta - sane_lists - smarty @@ -99,6 +102,9 @@ markdown_extensions: - pymdownx.caret - pymdownx.critic - pymdownx.details + - pymdownx.highlight: + pygments_style: manni + noclasses: true - pymdownx.inlinehilite - pymdownx.magiclink - pymdownx.mark @@ -117,9 +123,12 @@ markdown_extensions: - pymdownx.extrarawhtml nav:' -navpart 1 . README "About" +navpart 1 . "README" "" -echo -ne " - 'docs/Demo-Sites.md' +navpart 1 . . "About Netdata" + +echo -ne " - 'docs/what-is-netdata.md' + - 'docs/Demo-Sites.md' - 'docs/netdata-security.md' - 'docs/anonymous-statistics.md' - 'docs/Donations-netdata-has-received.md' @@ -138,6 +147,7 @@ echo -ne " - 'docs/Demo-Sites.md' - 'packaging/installer/README.md' - 'packaging/docker/README.md' - 'packaging/installer/UPDATE.md' + - 'packaging/DISTRIBUTIONS.md' - 'packaging/installer/UNINSTALL.md' - 'docs/GettingStarted.md' - Running Netdata: @@ -153,6 +163,7 @@ echo -ne " - Running behind another web server: - 'docs/Running-behind-apache.md' - 'docs/Running-behind-lighttpd.md' - 'docs/Running-behind-caddy.md' + - 'docs/Running-behind-haproxy.md' " #navpart 2 system navpart 2 database @@ -253,9 +264,10 @@ navpart 2 web/api/badges "" "" 2 navpart 2 web/api/health "" "" 2 navpart 2 web/api/queries "" "Queries" 2 -echo -ne "- Hacking Netdata: +echo -ne "- Additional Info: - CODE_OF_CONDUCT.md - CONTRIBUTORS.md + - packaging/maintainers/README.md " navpart 2 packaging/makeself "" "" 4 navpart 2 libnetdata "" "libnetdata" 4 diff --git a/docs/generator/checklinks.sh b/docs/generator/checklinks.sh index acc144656..5012ad17d 100755 --- a/docs/generator/checklinks.sh +++ b/docs/generator/checklinks.sh @@ -5,6 +5,8 @@ # Validates and tries to fix all links that will cause issues either in the repo, or in the html site GENERATOR_DIR="docs/generator" +MKDOCS_DIR="doc" +DOCS_DIR=${GENERATOR_DIR}/${MKDOCS_DIR} dbg () { if [ "$VERBOSE" -eq 1 ] ; then printf "%s\\n" "${1}" ; fi @@ -186,25 +188,27 @@ ck_netdata_relative () { fi ;; * ) - if [ -f "$fpath/$rlnk" ] ; then - dbg "-- # (path/someotherfile) $rlnk" - if [ "$fpath" = "." ] ; then - s="https://github.com/netdata/netdata/tree/master/$rlnk" - else - s="https://github.com/netdata/netdata/tree/master/$fpath/$rlnk" + if [ -d "$fpath/$rlnk" ] ; then + dbg "-- # (path) -> htmldoc (path/)" + testf "$f" "$fpath/$rlnk/README.md" + if [ $? -eq 0 ] ; then + s="$rlnk/" + if [ "$fname" != "README.md" ] ; then s="../$s"; fi fi else - if [ -d "$fpath/$rlnk" ] ; then - dbg "-- # (path) -> htmldoc (path/)" - testf "$f" "$fpath/$rlnk/README.md" - if [ $? -eq 0 ] ; then - s="$rlnk/" - if [ "$fname" != "README.md" ] ; then s="../$s"; fi + cd - >/dev/null + if [ -f "$fpath/$rlnk" ] ; then + dbg "-- # (path/someotherfile) $rlnk" + if [ "$fpath" = "." ] ; then + s="https://github.com/netdata/netdata/tree/master/$rlnk" + else + s="https://github.com/netdata/netdata/tree/master/$fpath/$rlnk" fi else echo "-- ERROR: $f - $rlnk is neither a file or a directory. Giving up!" EXITCODE=1 fi + cd $DOCS_DIR >/dev/null fi ;; esac @@ -212,7 +216,7 @@ ck_netdata_relative () { if [[ ! -z $s ]] ; then srch=$(echo "$rlnk" | sed 's/\//\\\//g') rplc=$(echo "$s" | sed 's/\//\\\//g') - fix "sed -i 's/($srch)/($rplc)/g' $GENERATOR_DIR/doc/$f" + fix "sed -i 's/($srch)/($rplc)/g' $f" fi } @@ -314,9 +318,11 @@ if [ -z "${file}" ] ; then printhelp exit 1 fi + cd ${DOCS_DIR} for f in $(find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o -name "*.md" -print); do checklinks "$f" done + cd - else if [ $RECURSIVE -eq 1 ] ; then printhelp diff --git a/docs/generator/custom/css/netdata.css b/docs/generator/custom/css/netdata.css index d9003be15..27f1b08cc 100644 --- a/docs/generator/custom/css/netdata.css +++ b/docs/generator/custom/css/netdata.css @@ -5,3 +5,71 @@ .md-typeset { font-size: .75rem } + +/* Underline text */ + +.md-typeset a:not(.nav-button):not(.md-icon):not(.headerlink) { + border-bottom: 1px solid #272b30; +} + +/* Custom styling for the new documentation homepage. + In particular, the three buttons for install/getting started/configuration. */ + +.homepage-nav { + display: flex; + margin-top: 1.4rem; +} + +.homepage-nav div { + flex: 1; +} + +.homepage-nav .nav-install { + margin-right: 1rem; +} + +.homepage-nav .nav-configuration { + margin-left: 1rem; +} + +.nav-button { + border: 2px solid black; + border-radius: 4px; + display: block; + font-weight: 700; + margin: 0 auto; + padding: 0.6rem 0; + text-align: center; +} + +/* Hide the label at the top of the navigation menu. Does nothing. + Well, it does do something on mobile, and this media query makes + sure it's hidden only on screens wide enough to not use the mobile sidebar. */ +@media only screen and (min-width:76.25em) { + .md-nav--primary .md-nav__title { + display: none; + } +} + +/* Change the language selector dropdown to match new color. */ +.md-header-nav select#sel { + background-color: rgba(0,0,0,.26) !important; + padding: 3px; + margin-left: 5px; + margin-right: 20px; +} + +/* Add some whitespace to the bottom of each doc. */ +.md-content { + margin-bottom: 6rem; +} + +/* Make sure inline code in tables doesn't break. */ +.md-typeset__table code { + word-break: normal; +} + +/* Bold the first item on the docs sidebar: Netdata Documentation */ +.md-nav--primary > .md-nav__list > .md-nav__item:first-of-type { + font-weight: 700; +} \ No newline at end of file diff --git a/docs/netdata-security.md b/docs/netdata-security.md index 955abebd8..a905717d9 100644 --- a/docs/netdata-security.md +++ b/docs/netdata-security.md @@ -89,7 +89,7 @@ In Netdata v1.9+ there is also access list support, like this: #### Use an authenticating web server in proxy mode -Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md#netdata-via-nginx), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy). +Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy). To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): diff --git a/docs/what-is-netdata.md b/docs/what-is-netdata.md new file mode 100644 index 000000000..6664897de --- /dev/null +++ b/docs/what-is-netdata.md @@ -0,0 +1,385 @@ +# What is Netdata? + +[![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() + +[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python) + +--- + +**Netdata** is **distributed, real-time, performance and health monitoring for systems and applications**. It is a highly optimized monitoring agent you install on all your systems and containers. + +Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring tool chains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc). + +_Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._ + +Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**. + +--- + +## How it looks + +The following animated image, shows the top part of a typical Netdata dashboard. + +![peek 2018-11-11 02-40](https://user-images.githubusercontent.com/2662304/48307727-9175c800-e55b-11e8-92d8-a581d60a4889.gif) + +*A typical Netdata dashboard, in 1:1 timing. Charts can be panned by dragging them, zoomed in/out with `SHIFT` + `mouse wheel`, an area can be selected for zoom-in with `SHIFT` + `mouse selection`. Netdata is highly interactive and **real-time**, optimized to get the work done!* + +> *We have a few online demos to experience it live: [https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)* + +## User base + +Netdata is used by hundreds of thousands of users all over the world. +Check our [GitHub watchers list](https://github.com/netdata/netdata/watchers). +You will find people working for **Amazon**, **Atos**, **Baidu**, **Cisco Systems**, **Citrix**, **Deutsche Telekom**, **DigitalOcean**, +**Elastic**, **EPAM Systems**, **Ericsson**, **Google**, **Groupon**, **Hortonworks**, **HP**, **Huawei**, +**IBM**, **Microsoft**, **NewRelic**, **Nvidia**, **Red Hat**, **SAP**, **Selectel**, **TicketMaster**, +**Vimeo**, and many more! + +### Docker pulls +We provide docker images for the most common architectures. These are statistics reported by docker hub: + +[![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) [![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) [![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/) + +### Registry +When you install multiple Netdata, they are integrated into **one distributed application**, via a [Netdata registry](../registry/#registry). This is a web browser feature and it allows us to count the number of unique users and unique Netdata servers installed. The following information comes from the global public Netdata registry we run: + +[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k÷=1000&value_color=orange&precision=2&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) + +*in the last 24 hours:*
[![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) + +## Why Netdata + +Netdata has a quite different approach to monitoring. + +Netdata is a monitoring agent you install on all your systems. It is: + +- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc) +- a **time-series database** - all stored in memory (does not touch the disks while it runs) +- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection +- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues + +All the above, are packaged together in a very flexible, extremely modular, distributed application. + +This is how Netdata compares to other monitoring solutions: + +Netdata|others (open-source and commercial) +:---:|:---: +**High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best) +Monitors everything, **thousands of metrics per node**|Monitor just a few metrics +UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view +**Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start +Install and get results **immediately**|Long preparation is required to get any useful results +Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance* +**Kills the console** for tracing performance issues|The console is always required for troubleshooting +Requires **zero dedicated resources**|Require large dedicated resources + +Netdata is **open-source**, **free**, super **fast**, very **easy**, completely **open**, extremely **efficient**, +**flexible** and integrate-able. + +It has been designed by **SysAdmins**, **DevOps** and **Developers** for troubleshooting performance problems, +not just visualize metrics. + +## How it works + +Netdata is a highly efficient, highly modular, metrics management engine. Its lockless design makes it ideal for concurrent operations on the metrics. + +![image](https://user-images.githubusercontent.com/2662304/48323827-b4c17580-e636-11e8-842c-0ee72fcb4115.png) + +This is how it works: + +Function|Description|Documentation +:---:|:---|:---: +**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](../collectors/#data-collection-plugins) +**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](../database/#database) +**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](../health/#health-monitoring) +**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](../streaming/#streaming-and-replication) +**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](../backends/) +**Query**|Multiple independent workers are attached to the [internal web server](../web/server/#web-server), servicing API requests, including [data queries](../web/api/queries/#database-queries).|[`web/api`](../web/api/#api) + +The result is a highly efficient, low latency system, supporting multiple readers and one writer on each metric. + +## Infographic + +This is a high level overview of Netdata feature set and architecture. +Click it to to interact with it (it has direct links to documentation). + +[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html) + + +## Features + +![finger-video](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif) + +This is what you should expect from Netdata: + +### General +- **1s granularity** - the highest possible resolution for all metrics. +- **Unlimited metrics** - collects all the available metrics, the more the better. +- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized. +- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](../database). +- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs). +- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box. +- **Zero maintenance** - You just run it, it does the rest. +- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored). +- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit. +- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring. + +### Health Monitoring & Alarms +- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods. +- **Notifications**: [alerta.io](../health/notifications/alerta/), [amazon sns](../health/notifications/awssns/), [discordapp.com](../health/notifications/discord/), [email](../health/notifications/email/), [flock.com](../health/notifications/flock/), [irc](../health/notifications/irc/), [kavenegar.com](../health/notifications/kavenegar/), [messagebird.com](../health/notifications/messagebird/), [pagerduty.com](../health/notifications/pagerduty/), [prowl](../health/notifications/prowl/), [pushbullet.com](../health/notifications/pushbullet/), [pushover.net](../health/notifications/pushover/), [rocket.chat](../health/notifications/rocketchat/), [slack.com](../health/notifications/slack/), [smstools3](../health/notifications/smstools3/), [syslog](../health/notifications/syslog/), [telegram.org](../health/notifications/telegram/), [twilio.com](../health/notifications/twilio/), [web](../health/notifications/web/) and [custom notifications](../health/notifications/custom/). + +### Integrations +- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). + +## Visualization + +- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`. +- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware. +- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually. +- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](../web/gui/confluence/). +- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary). + +### Positive and negative values + +To improve clarity on charts, Netdata dashboards present **positive** values for metrics representing `read`, `input`, `inbound`, `received` and **negative** values for metrics representing `write`, `output`, `outbound`, `sent`. + +![positive-and-negative-values](https://user-images.githubusercontent.com/2662304/48309090-7c5c6180-e57a-11e8-8e03-3a7538c14223.gif) + +*Netdata charts showing the bandwidth and packets of a network interface. `received` is positive and `sent` is negative.* + +### Autoscaled y-axis + +Netdata charts automatically zoom vertically, to visualize the variation of each metric within the visible time-frame. + +![non-zero-based](https://user-images.githubusercontent.com/2662304/48309139-3d2f1000-e57c-11e8-9a44-b91758134b00.gif) + +*A zero based `stacked` chart, automatically switches to an auto-scaled `area` chart when a single dimension is selected.* + +### Charts are synchronized + +Charts on Netdata dashboards are synchronized to each other. There is no master chart. Any chart can be panned or zoomed at any time, and all other charts will follow. + +![charts-are-synchronized](https://user-images.githubusercontent.com/2662304/48309003-b4fb3b80-e578-11e8-86f6-f505c7059c15.gif) + +*Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart.* + +> The visible time-frame (pan and zoom) is propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry). + +### Highlighted time-frame + +To improve visual anomaly detection across charts, the user can highlight a time-frame (by pressing `ALT` + `mouse selection`) on all charts. + +![highlighted-timeframe](https://user-images.githubusercontent.com/2662304/48311876-f9093300-e5ae-11e8-9c74-e3e291741990.gif) + +*A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts.* + +> Highlighted ranges are propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry). + + +## What does it monitor + +Netdata data collection is **extensible** - you can monitor anything you can get a metric for. +Its [Plugin API](../collectors/plugins.d/) supports all programing languages (anything can be a Netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc). + +- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`. +- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`. + +#### APM (Application Performance Monitoring) +- **[statsd](../collectors/statsd.plugin/)** - Netdata is a fully featured statsd server. +- **[Go expvar](../collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package. +- **[Spring Boot](../collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library. +- **[uWSGI](../collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications. + +#### System Resources +- **[CPU Utilization](../collectors/proc.plugin/)** - total and per core CPU usage. +- **[Interrupts](../collectors/proc.plugin/)** - total and per core CPU interrupts. +- **[SoftIRQs](../collectors/proc.plugin/)** - total and per core SoftIRQs. +- **[SoftNet](../collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity. +- **[CPU Throttling](../collectors/proc.plugin/)** - collects per core CPU throttling. +- **[CPU Frequency](../collectors/proc.plugin/)** - collects the current CPU frequency. +- **[CPU Idle](../collectors/proc.plugin/)** - collects the time spent per processor state. +- **[IdleJitter](../collectors/idlejitter.plugin/)** - measures CPU latency. +- **[Entropy](../collectors/proc.plugin/)** - random numbers pool, using in cryptography. +- **[Interprocess Communication - IPC](../collectors/proc.plugin/)** - such as semaphores and semaphores arrays. + +#### Memory +- **[ram](../collectors/proc.plugin/)** - collects info about RAM usage. +- **[swap](../collectors/proc.plugin/)** - collects info about swap memory usage. +- **[available memory](../collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes. +- **[committed memory](../collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes. +- **[Page Faults](../collectors/proc.plugin/)** - collects the system page faults (major and minor). +- **[writeback memory](../collectors/proc.plugin/)** - collects the system dirty memory and writeback activity. +- **[huge pages](../collectors/proc.plugin/)** - collects the amount of RAM used for huge pages. +- **[KSM](../collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper). +- **[Numa](../collectors/proc.plugin/)** - collects Numa info on systems that support it. +- **[slab](../collectors/proc.plugin/)** - collects info about the Linux kernel memory usage. + +#### Disks +- **[block devices](../collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc. +- **[BCACHE](../collectors/proc.plugin/)** - detailed performance of SSD caching devices. +- **[DiskSpace](../collectors/proc.plugin/)** - monitors disk space usage. +- **[mdstat](../collectors/proc.plugin/)** - software RAID. +- **[hddtemp](../collectors/python.d.plugin/hddtemp/)** - disk temperatures. +- **[smartd](../collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values. +- **[device mapper](../collectors/proc.plugin/)** - naming disks. +- **[Veritas Volume Manager](../collectors/proc.plugin/)** - naming disks. +- **[megacli](../collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats. +- **[adaptec_raid](../collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics. +- **[ioping](../collectors/ioping.plugin/)** - to measure disk read/write latency. + +#### Filesystems +- **[BTRFS](../collectors/proc.plugin/)** - detailed disk space allocation and usage. +- **[Ceph](../collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc. +- **[NFS file servers and clients](../collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls +- **[Samba](../collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing. +- **[ZFS](../collectors/proc.plugin/)** - detailed performance and resource usage. + +#### Networking +- **[Network Stack](../collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops). +- **[Netfilter](../collectors/proc.plugin/)** - everything about the netfilter connection tracker. +- **[SynProxy](../collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS). +- **[NFacct](../collectors/nfacct.plugin/)** - collects accounting data from iptables. +- **[Network QoS](../collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time +- **[FPing](../collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts. +- **[ISC dhcpd](../collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc. +- **[AP](../collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`). +- **[SNMP](../collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these). +- **[port_check](../collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time. + +#### Virtual Private Networks +- **[OpenVPN](../collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel. +- **[LibreSwan](../collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel. +- **[Tor](../collectors/python.d.plugin/tor/)** - collects Tor traffic statistics. + +#### Processes +- **[System Processes](../collectors/proc.plugin/)** - running, blocked, forks, active. +- **[Applications](../collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group. +- **[systemd](../collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS. + +#### Users +- **[Users and User Groups resource usage](../collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets +- **[logind](../collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected. + +#### Containers and VMs +- **[Containers](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc). +- **[libvirt VMs](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS. +- **[dockerd](../collectors/python.d.plugin/dockerd/)** - collects docker health metrics. + +#### Web Servers +- **[Apache and lighttpd](../collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers. +- **[IPFS](../collectors/python.d.plugin/ipfs/)** - bandwidth, peers. +- **[LiteSpeed](../collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics. +- **[Nginx](../collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers. +- **[Nginx+](../collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics. +- **[PHP-FPM](../collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc. +- **[Tomcat](../collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc. +- **[web server `access.log` files](../collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc. +- **[HTTP check](../collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content. + +#### Proxies, Balancers, Accelerators +- **[HAproxy](../collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc. +- **[Squid](../collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests. +- **[Traefik](../collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime). +- **[Varnish](../collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc. +- **[IPVS](../collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer. + +#### Database Servers +- **[CouchDB](../collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc. +- **[MemCached](../collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc. +- **[MongoDB](../collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc. +- **[MySQL and mariadb](../collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more. +- **[PostgreSQL](../collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more. +- **[Proxy SQL](../collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics. +- **[Redis](../collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves. +- **[RethinkDB](../collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics. + +#### Message Brokers +- **[beanstalkd](../collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring. +- **[RabbitMQ](../collectors/python.d.plugin/rabbitmq/)** - performance and health metrics. + +#### Search and Indexing +- **[ElasticSearch](../collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc. + +#### DNS Servers +- **[bind_rndc](../collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported. +- **[dnsdist](../collectors/python.d.plugin/dnsdist/)** - performance and health metrics. +- **[ISC Bind (named)](../collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported. +- **[NSD](../collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc. +- **[PowerDNS](../collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc. +- **[unbound](../collectors/python.d.plugin/unbound/)** - performance and resource usage metrics. +- **[dns_query_time](../collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics. + +#### Time Servers +- **[chrony](../collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time). +- **[ntpd](../collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables. + +#### Mail Servers +- **[Dovecot](../collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers. +- **[Exim](../collectors/python.d.plugin/exim/)** - message queue (emails queued). +- **[Postfix](../collectors/python.d.plugin/postfix/)** - message queue (entries, size). + +#### Hardware Sensors +- **[IPMI](../collectors/freeipmi.plugin/)** - enterprise hardware sensors and events. +- **[lm-sensors](../collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc. +- **[Nvidia](../collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs. +- **[RPi](../collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors. +- **[w1sensor](../collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors. + +#### UPSes +- **[apcupsd](../collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics +- **[NUT](../collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics +- **[Linux Power Supply](../collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux. + +#### Social Sharing Servers +- **[RetroShare](../collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics. + +#### Security +- **[Fail2Ban](../collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails. + +#### Authentication, Authorization, Accounting (AAA, RADIUS, LDAP) Servers +- **[FreeRadius](../collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting). + +#### Telephony Servers +- **[opensips](../collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics. + +#### Household Appliances +- **[SMA webbox](../collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation. +- **[Fronius](../collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation. +- **[StiebelEltron](../collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web). + +#### Game Servers +- **[SpigotMC](../collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console. + +#### Distributed Computing +- **[BOINC](../collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. + +#### Media Streaming Servers +- **[IceCast](../collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources. + +### Monitoring Systems +- **[Monit](../collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks). + +#### Provisioning Systems +- **[Puppet](../collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics. + +You can easily extend Netdata, by writing plugins that collect data from any source, using any computer language. + +## Community + +We welcome [contributions](../CONTRIBUTING.md). So, feel free to join the team. + +To report bugs, or get help, use [GitHub Issues](https://github.com/netdata/netdata/issues). + +You can also find Netdata on: + +- [Facebook](https://www.facebook.com/linuxnetdata/) +- [Twitter](https://twitter.com/linuxnetdata) +- [OpenHub](https://www.openhub.net/p/netdata) +- [Repology](https://repology.org/metapackage/netdata/versions) +- [StackShare](https://stackshare.io/netdata) + +## License + +Netdata is [GPLv3+](../LICENSE). + +Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](../REDISTRIBUTED.md). \ No newline at end of file diff --git a/health/README.md b/health/README.md index 81cc043d0..345f7fc70 100644 --- a/health/README.md +++ b/health/README.md @@ -65,7 +65,7 @@ This line starts an alarm or alarm template. alarm: NAME ``` -or +or ``` template: NAME @@ -161,7 +161,7 @@ The simple pattern syntax and operation is explained in [simple patterns](../lib This line makes a database lookup to find a value. This result of this lookup is available as `$this`. The format is: - + ``` lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] ``` @@ -311,15 +311,15 @@ delay: [[[up U] [down D] multiplier M] max X] notification for this event will be sent 10 seconds after the actual event. This is used in hope the alarm will get back to its previous state within the duration given. The default `U` is zero. - + - `down D` defines the delay to be applied to a notification for an alarm that moves to lower state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m` will delay the notification by 1 minute. This is used to prevent notifications for flapping alarms. The default `D` is zero. - + - `mutliplier M` multiplies `U` and `D` when an alarm changes state, while a notification is delayed. The default multiplier is `1.0`. - + - `max X` defines the maximum absolute notification delay an alarm may get. The default `X` is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`). @@ -361,13 +361,13 @@ repeat: [off] [warning DURATION] [critical DURATION] #### Alarm line `option` -The only possible value for the `option` line is +The only possible value for the `option` line is ``` option: no-clear-notification ``` -For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes, compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm. However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option. +For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes, compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm. However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option. --- @@ -417,14 +417,14 @@ crit: $this > (($status == $CRITICAL) ? (85) : (95)) The above say: * If the alarm is currently a warning, then the threshold for being considered a warning is 75, otherwise it's 85. - + * If the alarm is currently critical, then the threshold for being considered critical is 85, otherwise it's 95. Which in turn, results in the following behavior: * While the value is rising, it will trigger a warning when it exceeds 85, and a critical alert when it exceeds 95. - + * While the value is falling, it will return to a warning state when it goes below 85, and a normal state when it goes below 75. @@ -442,13 +442,13 @@ Which in turn, results in the following behavior: You can find all the variables that can be used for a given chart, using `http://your.netdata.ip:19999/api/v1/alarm_variables?chart=CHART_NAME` Example: [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). - + _Hint: If you don't know how to find the CHART_NAME, you can read about it [here](../docs/Charts.md#charts)._ -Netdata supports 3 internal indexes for variables that will be used in health monitoring. +Netdata supports 3 internal indexes for variables that will be used in health monitoring.
The variables below can be used in both chart alarms and context templates. -Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families). +Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families).
- **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of $this for the other configured alarms of the chart also appears, under the name of each configured alarm. @@ -478,13 +478,13 @@ Although the `alarm_variables` link shows you variables for a particular chart, - **special variables*** are: - `$this`, which is resolved to the value of the current alarm. - + - `$status`, which is resolved to the current status of the alarm (the current = the last status, i.e. before the current database lookup and the evaluation of the `calc` line). This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`, `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as expected. - + - `$now`, which is resolved to current unix timestamp. ## Alarm Statuses @@ -493,16 +493,16 @@ Alarms can have the following statuses: - `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to netdata to reload health configuration) - + - `UNINITIALIZED` - the alarm is not initialized yet - + - `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed, a division by zero occurred, etc) - + - `CLEAR` - the alarm is not armed / raised (i.e. is OK) - + - `WARNING` - the warning expression resulted in true or non-zero - + - `CRITICAL` - the critical expression resulted in true or non-zero The external script will be called for all status changes. @@ -675,9 +675,6 @@ You can find how netdata interpreted the expressions by examining the alarm at ` ## Disabling health checks or silencing notifications at runtime -The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api). +It's currently not possible to schedule notifications from within the alarm template. For those scenarios where you need to temporary disable notifications (for instance when running backups triggers a disk alert) you can disable or silence notifications are runtime. The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api). [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() - - - diff --git a/health/health.d/dbengine.conf b/health/health.d/dbengine.conf index 7a623ba2b..956abf294 100644 --- a/health/health.d/dbengine.conf +++ b/health/health.d/dbengine.conf @@ -22,5 +22,5 @@ every: 10s crit: $this > 0 delay: down 1h multiplier 1.5 max 3h - info: number of IO errors dbengine came across the last 10 minutes (out of space, bad disk etc) + info: number of IO errors dbengine came across the last 10 minutes (CRC errors, out of space, bad disk etc) to: sysadmin diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in index 852718bc9..bbb960918 100755 --- a/health/notifications/alarm-notify.sh.in +++ b/health/notifications/alarm-notify.sh.in @@ -352,6 +352,7 @@ SYSLOG_FACILITY= EMAIL_SENDER= EMAIL_CHARSET=$(locale charmap 2>/dev/null) EMAIL_THREADING= +EMAIL_PLAINTEXT_ONLY= # irc configs IRC_NICKNAME= @@ -2093,16 +2094,7 @@ SENT_SYSLOG=$? # ----------------------------------------------------------------------------- # send the email -send_email < +EOF + +send_email <name, e->fullname); continue; } - sprintf(ne.name, "%s[%lu]", e->name, i); - sprintf(ne.fullname, "%s[%lu]", e->fullname, i); + snprintfz(ne.name, JSON_NAME_LEN, "%s[%lu]", e->name, i); + snprintfz(ne.fullname, JSON_FULLNAME_LEN, "%s[%lu]", e->fullname, i); switch(t[start].type) { case JSMN_PRIMITIVE: diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h index 43dc1e04d..ef883300b 100644 --- a/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -81,6 +81,7 @@ #include #include #include +#include #ifdef HAVE_NETINET_IN_H #include @@ -312,5 +313,6 @@ extern char *netdata_configured_host_prefix; #include "url/url.h" #include "json/json.h" #include "health/health.h" +#include "string/utf8.h" #endif // NETDATA_LIB_H diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c index 91e226902..ca9a5aee9 100644 --- a/libnetdata/locks/locks.c +++ b/libnetdata/locks/locks.c @@ -82,7 +82,8 @@ int __netdata_mutex_unlock(netdata_mutex_t *mutex) { return ret; } -int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_init_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -98,7 +99,8 @@ int netdata_mutex_init_debug( const char *file, const char *function, const unsi return ret; } -int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_lock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -114,7 +116,8 @@ int netdata_mutex_lock_debug( const char *file, const char *function, const unsi return ret; } -int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_trylock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -130,7 +133,8 @@ int netdata_mutex_trylock_debug( const char *file, const char *function, const u return ret; } -int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -219,7 +223,8 @@ int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) { } -int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -235,7 +240,8 @@ int netdata_rwlock_destroy_debug( const char *file, const char *function, const return ret; } -int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_init_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -251,7 +257,8 @@ int netdata_rwlock_init_debug( const char *file, const char *function, const uns return ret; } -int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -267,7 +274,8 @@ int netdata_rwlock_rdlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -283,7 +291,8 @@ int netdata_rwlock_wrlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -299,7 +308,8 @@ int netdata_rwlock_unlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_tryrdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -315,7 +325,8 @@ int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, cons return ret; } -int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_trywrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c index 845363fd2..177aebfc0 100644 --- a/libnetdata/popen/popen.c +++ b/libnetdata/popen/popen.c @@ -45,110 +45,91 @@ static void mypopen_del(FILE *fp) { #define PIPE_READ 0 #define PIPE_WRITE 1 -FILE *mypopen(const char *command, volatile pid_t *pidptr) -{ - int pipefd[2]; - - if(pipe(pipefd) == -1) return NULL; +static inline FILE *custom_popene(const char *command, volatile pid_t *pidptr, char **env) { + FILE *fp; + int pipefd[2], error; + pid_t pid; + char *const spawn_argv[] = { + "sh", + "-c", + (char *)command, + NULL + }; + posix_spawnattr_t attr; + posix_spawn_file_actions_t fa; - int pid = fork(); - if(pid == -1) { - close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); + if(pipe(pipefd) == -1) return NULL; + if ((fp = fdopen(pipefd[PIPE_READ], "r")) == NULL) { + goto error_after_pipe; } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - /*mypopen_add(fp, pid);*/ - return(fp); - } - // the child - // close all files + // Mark all files to be closed by the exec() stage of posix_spawn() int i; for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); - - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); + if(i != STDIN_FILENO && i != STDERR_FILENO) + fcntl(i, F_SETFD, FD_CLOEXEC); + + if (!posix_spawn_file_actions_init(&fa)) { + // move the pipe to stdout in the child + if (posix_spawn_file_actions_adddup2(&fa, pipefd[PIPE_WRITE], STDOUT_FILENO)) { + error("posix_spawn_file_actions_adddup2() failed"); + goto error_after_posix_spawn_file_actions_init; + } + } else { + error("posix_spawn_file_actions_init() failed."); + goto error_after_pipe; } - -#ifdef DETACH_PLUGINS_FROM_NETDATA - // this was an attempt to detach the child and use the suspend mode charts.d - // unfortunatelly it does not work as expected. - - // fork again to become session leader - pid = fork(); - if(pid == -1) - error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid()); - - if(pid != 0) { - // the parent - exit(0); + if (!(error = posix_spawnattr_init(&attr))) { + // reset all signals in the child + sigset_t mask; + + if (posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF)) + error("posix_spawnattr_setflags() failed."); + sigemptyset(&mask); + if (posix_spawnattr_setsigmask(&attr, &mask)) + error("posix_spawnattr_setsigmask() failed."); + } else { + error("posix_spawnattr_init() failed."); } - - // set a new process group id for just this child - if( setpgid(0, 0) != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid()); - - if( getpgid(0) != getpid() ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0)); - - if( setsid() != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid()); - - fprintf(stdout, "MYPID %d\n", getpid()); - fflush(NULL); -#endif - - // reset all signals - signals_unblock(); - signals_reset(); - - debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid()); - execl("/bin/sh", "sh", "-c", command, NULL); - exit(1); -} - -FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { - int pipefd[2]; - - if(pipe(pipefd) == -1) - return NULL; - - int pid = fork(); - if(pid == -1) { + if (!posix_spawn(&pid, "/bin/sh", &fa, &attr, spawn_argv, env)) { + *pidptr = pid; + debug(D_CHILDS, "Spawned command: '%s' on pid %d from parent pid %d.", command, pid, getpid()); + } else { + error("Failed to spawn command: '%s' from parent pid %d.", command, getpid()); close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); - return NULL; + fp = NULL; } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - return(fp); + close(pipefd[PIPE_WRITE]); + + if (!error) { + // posix_spawnattr_init() succeeded + if (posix_spawnattr_destroy(&attr)) + error("posix_spawnattr_destroy"); } - // the child + if (posix_spawn_file_actions_destroy(&fa)) + error("posix_spawn_file_actions_destroy"); + + return fp; + +error_after_posix_spawn_file_actions_init: + if (posix_spawn_file_actions_destroy(&fa)) + error("posix_spawn_file_actions_destroy"); +error_after_pipe: + close(pipefd[PIPE_READ]); + close(pipefd[PIPE_WRITE]); + return NULL; +} - // close all files - int i; - for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); +// See man environ +extern char **environ; - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); - } +FILE *mypopen(const char *command, volatile pid_t *pidptr) { + return custom_popene(command, pidptr, environ); +} - execle("/bin/sh", "sh", "-c", command, NULL, env); - exit(1); +FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { + return custom_popene(command, pidptr, env); } int mypclose(FILE *fp, pid_t pid) { diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c index dcbd3f650..ab324a169 100644 --- a/libnetdata/socket/security.c +++ b/libnetdata/socket/security.c @@ -7,8 +7,6 @@ SSL_CTX *netdata_client_ctx=NULL; SSL_CTX *netdata_srv_ctx=NULL; const char *security_key=NULL; const char *security_cert=NULL; -int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL; -int netdata_use_ssl_on_http = NETDATA_SSL_FORCE; //We force SSL due safety reasons int netdata_validate_server = NETDATA_SSL_VALID_CERTIFICATE; /** @@ -20,7 +18,7 @@ int netdata_validate_server = NETDATA_SSL_VALID_CERTIFICATE; * @param where the variable with the flags set. * @param ret the return of the caller */ -static void security_info_callback(const SSL *ssl, int where, int ret) { +static void security_info_callback(const SSL *ssl, int where, int ret __maybe_unused) { (void)ssl; if (where & SSL_CB_ALERT) { debug(D_WEB_CLIENT,"SSL INFO CALLBACK %s %s", SSL_alert_type_string(ret), SSL_alert_desc_string_long(ret)); @@ -166,7 +164,7 @@ void security_start_ssl(int selector) { switch (selector) { case NETDATA_SSL_CONTEXT_SERVER: { struct stat statbuf; - if (stat(security_key,&statbuf) || stat(security_cert,&statbuf)) { + if (stat(security_key, &statbuf) || stat(security_cert, &statbuf)) { info("To use encryption it is necessary to set \"ssl certificate\" and \"ssl key\" in [web] !\n"); return; } @@ -176,6 +174,9 @@ void security_start_ssl(int selector) { } case NETDATA_SSL_CONTEXT_STREAMING: { netdata_client_ctx = security_initialize_openssl_client(); + //This is necessary for the stream, because it is working sometimes with nonblock socket. + //It returns the bitmask afte to change, there is not any description of errors in the documentation + SSL_CTX_set_mode(netdata_client_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |SSL_MODE_AUTO_RETRY); break; } case NETDATA_SSL_CONTEXT_OPENTSDB: { @@ -185,6 +186,11 @@ void security_start_ssl(int selector) { } } +/** + * Clean Open SSL + * + * Clean all the allocated contexts from netdata. + */ void security_clean_openssl() { if (netdata_srv_ctx) { @@ -206,6 +212,17 @@ void security_clean_openssl() { #endif } +/** + * Process accept + * + * Process the SSL handshake with the client case it is necessary. + * + * @param ssl is a pointer for the SSL structure + * @param msg is a copy of the first 8 bytes of the initial message received + * + * @return it returns 0 case it performs the handshake, 8 case it is clean connection + * and another integer power of 2 otherwise. + */ int security_process_accept(SSL *ssl,int msg) { int sock = SSL_get_fd(ssl); int test; @@ -250,9 +267,18 @@ int security_process_accept(SSL *ssl,int msg) { debug(D_WEB_CLIENT_ACCESS,"SSL Handshake finished %s errno %d on socket fd %d", ERR_error_string((long)SSL_get_error(ssl, test), NULL), errno, sock); } - return 0; + return NETDATA_SSL_HANDSHAKE_COMPLETE; } +/** + * Test Certificate + * + * Check the certificate of Netdata master + * + * @param ssl is the connection structure + * + * @return It returns 0 on success and -1 otherwise + */ int security_test_certificate(SSL *ssl) { X509* cert = SSL_get_peer_certificate(ssl); int ret; @@ -271,7 +297,48 @@ int security_test_certificate(SSL *ssl) { } else { ret = 0; } + return ret; } +/** + * Location for context + * + * Case the user give us a directory with the certificates available and + * the Netdata master certificate, we use this function to validate the certificate. + * + * @param ctx the context where the path will be set. + * @param file the file with Netdata master certificate. + * @param path the directory where the certificates are stored. + * + * @return It returns 0 on success and -1 otherwise. + */ +int security_location_for_context(SSL_CTX *ctx, char *file, char *path) { + struct stat statbuf; + if (stat(file, &statbuf)) { + info("Netdata does not have a SSL master certificate, so it will use the default OpenSSL configuration to validate certificates!"); + return 0; + } + + ERR_clear_error(); + u_long err; + char buf[256]; + if(!SSL_CTX_load_verify_locations(ctx, file, path)) { + goto slfc; + } + + if(!SSL_CTX_set_default_verify_paths(ctx)) { + goto slfc; + } + + return 0; + +slfc: + while ((err = ERR_get_error()) != 0) { + ERR_error_string_n(err, buf, sizeof(buf)); + error("Cannot set the directory for the certificates and the master SSL certificate: %s",buf); + } + return -1; +} + #endif diff --git a/libnetdata/socket/security.h b/libnetdata/socket/security.h index 8beb9672f..697e0fda1 100644 --- a/libnetdata/socket/security.h +++ b/libnetdata/socket/security.h @@ -25,7 +25,7 @@ struct netdata_ssl{ SSL *conn; //SSL connection - int flags; + int flags; //The flags for SSL connection }; extern SSL_CTX *netdata_opentsdb_ctx; @@ -33,9 +33,8 @@ extern SSL_CTX *netdata_client_ctx; extern SSL_CTX *netdata_srv_ctx; extern const char *security_key; extern const char *security_cert; -extern int netdata_use_ssl_on_stream; -extern int netdata_use_ssl_on_http; extern int netdata_validate_server; +extern int security_location_for_context(SSL_CTX *ctx,char *file,char *path); void security_openssl_library(); void security_clean_openssl(); diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c index 282710081..22abb47f4 100644 --- a/libnetdata/socket/socket.c +++ b/libnetdata/socket/socket.c @@ -301,39 +301,47 @@ void listen_sockets_close(LISTEN_SOCKETS *sockets) { sockets->failed = 0; } -WEB_CLIENT_ACL socket_ssl_acl(char *ssl) { +/* + * SSL ACL + * + * Search the SSL acl and apply it case it is set. + * + * @param acl is the acl given by the user. + */ +WEB_CLIENT_ACL socket_ssl_acl(char *acl) { + char *ssl = strchr(acl,'^'); + if(ssl) { + //Due the format of the SSL command it is always the last command, + //we finish it here to avoid problems with the ACLs + *ssl = '\0'; #ifdef ENABLE_HTTPS - if (!strcmp(ssl,"optional")) { - netdata_use_ssl_on_http = NETDATA_SSL_OPTIONAL; - return WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; - } - else if (!strcmp(ssl,"force")) { - netdata_use_ssl_on_stream = NETDATA_SSL_FORCE; - return WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; - } + ssl++; + if (!strncmp("SSL=",ssl,4)) { + ssl += 4; + if (!strcmp(ssl,"optional")) { + return WEB_CLIENT_ACL_SSL_OPTIONAL; + } + else if (!strcmp(ssl,"force")) { + return WEB_CLIENT_ACL_SSL_FORCE; + } + } #endif + } return WEB_CLIENT_ACL_NONE; } WEB_CLIENT_ACL read_acl(char *st) { - char *ssl = strchr(st,'^'); - if (ssl) { - ssl++; - if (!strncmp("SSL=",ssl,4)) { - ssl += 4; - } - socket_ssl_acl(ssl); - } + WEB_CLIENT_ACL ret = socket_ssl_acl(st); - if (!strcmp(st,"dashboard")) return WEB_CLIENT_ACL_DASHBOARD; - if (!strcmp(st,"registry")) return WEB_CLIENT_ACL_REGISTRY; - if (!strcmp(st,"badges")) return WEB_CLIENT_ACL_BADGE; - if (!strcmp(st,"management")) return WEB_CLIENT_ACL_MGMT; - if (!strcmp(st,"streaming")) return WEB_CLIENT_ACL_STREAMING; - if (!strcmp(st,"netdata.conf")) return WEB_CLIENT_ACL_NETDATACONF; + if (!strcmp(st,"dashboard")) ret |= WEB_CLIENT_ACL_DASHBOARD; + if (!strcmp(st,"registry")) ret |= WEB_CLIENT_ACL_REGISTRY; + if (!strcmp(st,"badges")) ret |= WEB_CLIENT_ACL_BADGE; + if (!strcmp(st,"management")) ret |= WEB_CLIENT_ACL_MGMT; + if (!strcmp(st,"streaming")) ret |= WEB_CLIENT_ACL_STREAMING; + if (!strcmp(st,"netdata.conf")) ret |= WEB_CLIENT_ACL_NETDATACONF; - return socket_ssl_acl(st); + return ret; } static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) { @@ -375,7 +383,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, error("LISTENER: Cannot create unix socket '%s'", path); sockets->failed++; } else { - acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT; listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0, acl_flags); added++; } @@ -425,7 +433,13 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, } acl_flags |= read_acl(portconfig); } else { - acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT; + } + + //Case the user does not set the option SSL in the "bind to", but he has + //the certificates, I must redirect, so I am assuming here the default option + if(!(acl_flags & WEB_CLIENT_ACL_SSL_OPTIONAL) && !(acl_flags & WEB_CLIENT_ACL_SSL_FORCE)) { + acl_flags |= WEB_CLIENT_ACL_SSL_DEFAULT; } uint32_t scope_id = 0; diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h index 9ea83bcc0..76b15def5 100644 --- a/libnetdata/socket/socket.h +++ b/libnetdata/socket/socket.h @@ -17,7 +17,10 @@ typedef enum web_client_acl { WEB_CLIENT_ACL_BADGE = 1 << 2, WEB_CLIENT_ACL_MGMT = 1 << 3, WEB_CLIENT_ACL_STREAMING = 1 << 4, - WEB_CLIENT_ACL_NETDATACONF = 1 << 5 + WEB_CLIENT_ACL_NETDATACONF = 1 << 5, + WEB_CLIENT_ACL_SSL_OPTIONAL = 1 << 6, + WEB_CLIENT_ACL_SSL_FORCE = 1 << 7, + WEB_CLIENT_ACL_SSL_DEFAULT = 1 << 8 } WEB_CLIENT_ACL; #define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD) @@ -26,6 +29,9 @@ typedef enum web_client_acl { #define web_client_can_access_mgmt(w) ((w)->acl & WEB_CLIENT_ACL_MGMT) #define web_client_can_access_stream(w) ((w)->acl & WEB_CLIENT_ACL_STREAMING) #define web_client_can_access_netdataconf(w) ((w)->acl & WEB_CLIENT_ACL_NETDATACONF) +#define web_client_is_using_ssl_optional(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_OPTIONAL) +#define web_client_is_using_ssl_force(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_FORCE) +#define web_client_is_using_ssl_default(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_DEFAULT) typedef struct listen_sockets { struct config *config; // the config file to use diff --git a/libnetdata/string/utf8.h b/libnetdata/string/utf8.h new file mode 100644 index 000000000..133ec710b --- /dev/null +++ b/libnetdata/string/utf8.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STRING_UTF8_H +#define NETDATA_STRING_UTF8_H 1 + +#define IS_UTF8_BYTE(x) (x & 0x80) +#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&(x & 0x40)) + +#endif /* NETDATA_STRING_UTF8_H */ diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c index 07a9f8069..7df9faaf0 100644 --- a/libnetdata/url/url.c +++ b/libnetdata/url/url.c @@ -43,8 +43,16 @@ char *url_encode(char *str) { return pbuf; } -/* Returns a url-decoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ +/** + * URL Decode + * + * Returns a url-decoded version of str + * IMPORTANT: be sure to free() the returned string after use + * + * @param str the string that will be decode + * + * @return a pointer for the url decoded. + */ char *url_decode(char *str) { size_t size = strlen(str) + 1; @@ -52,6 +60,149 @@ char *url_decode(char *str) { return url_decode_r(buf, str, size); } +/** + * Percentage escape decode + * + * Decode %XX character or return 0 if cannot + * + * @param s the string to decode + * + * @return The character decoded on success and 0 otherwise + */ +char url_percent_escape_decode(char *s) { + if(likely(s[1] && s[2])) + return from_hex(s[1]) << 4 | from_hex(s[2]); + return 0; +} + +/** + * Get byte length + * + * This (utf8 string related) should be moved in separate file in future + * + * @param c is the utf8 character + * * + * @return It reurns the length of the specific character. + */ +char url_utf8_get_byte_length(char c) { + if(!IS_UTF8_BYTE(c)) + return 1; + + char length = 0; + while(likely(c & 0x80)) { + length++; + c <<= 1; + } + //4 byte is max size for UTF-8 char + //10XX XXXX is not valid character -> check length == 1 + if(length > 4 || length == 1) + return -1; + + return length; +} + +/** + * Decode Multibyte UTF8 + * + * Decode % encoded UTF-8 characters and copy them to *d + * + * @param s first address + * @param d + * @param d_end last address + * + * @return count of bytes written to *d + */ +char url_decode_multibyte_utf8(char *s, char *d, char *d_end) { + char first_byte = url_percent_escape_decode(s); + + if(unlikely(!first_byte || !IS_UTF8_STARTBYTE(first_byte))) + return 0; + + char byte_length = url_utf8_get_byte_length(first_byte); + + if(unlikely(byte_length <= 0 || d+byte_length >= d_end)) + return 0; + + char to_read = byte_length; + while(to_read > 0) { + char c = url_percent_escape_decode(s); + + if(unlikely( !IS_UTF8_BYTE(c) )) + return 0; + if((to_read != byte_length) && IS_UTF8_STARTBYTE(c)) + return 0; + + *d++ = c; + s+=3; + to_read--; + } + + return byte_length; +} + +/* + * The utf8_check() function scans the '\0'-terminated string starting + * at s. It returns a pointer to the first byte of the first malformed + * or overlong UTF-8 sequence found, or NULL if the string contains + * only correct UTF-8. It also spots UTF-8 sequences that could cause + * trouble if converted to UTF-16, namely surrogate characters + * (U+D800..U+DFFF) and non-Unicode positions (U+FFFE..U+FFFF). This + * routine is very likely to find a malformed sequence if the input + * uses any other encoding than UTF-8. It therefore can be used as a + * very effective heuristic for distinguishing between UTF-8 and other + * encodings. + * + * Markus Kuhn -- 2005-03-30 + * License: http://www.cl.cam.ac.uk/~mgk25/short-license.html + */ +unsigned char *utf8_check(unsigned char *s) +{ + while (*s) + { + if (*s < 0x80) + /* 0xxxxxxx */ + s++; + else if ((s[0] & 0xe0) == 0xc0) + { + /* 110XXXXx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[0] & 0xfe) == 0xc0) /* overlong? */ + return s; + else + s += 2; + } + else if ((s[0] & 0xf0) == 0xe0) + { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */ + (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */ + (s[0] == 0xef && s[1] == 0xbf && + (s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */ + return s; + else + s += 3; + } + else if ((s[0] & 0xf8) == 0xf0) + { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[3] & 0xc0) != 0x80 || + (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */ + (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */ + return s; + else + s += 4; + } + else + return s; + } + + return NULL; +} + char *url_decode_r(char *to, char *url, size_t size) { char *s = url, // source *d = to, // destination @@ -59,12 +210,24 @@ char *url_decode_r(char *to, char *url, size_t size) { while(*s && d < e) { if(unlikely(*s == '%')) { - if(likely(s[1] && s[2])) { - char t = from_hex(s[1]) << 4 | from_hex(s[2]); + char t = url_percent_escape_decode(s); + if(IS_UTF8_BYTE(t)) { + char bytes_written = url_decode_multibyte_utf8(s, d, e); + if(likely(bytes_written)){ + d += bytes_written; + s += (bytes_written * 3)-1; + } + else { + goto fail_cleanup; + } + } + else if(likely(t) && isprint(t)) { // avoid HTTP header injection - *d++ = (char)((isprint(t))? t : ' '); + *d++ = t; s += 2; } + else + goto fail_cleanup; } else if(unlikely(*s == '+')) *d++ = ' '; @@ -77,5 +240,166 @@ char *url_decode_r(char *to, char *url, size_t size) { *d = '\0'; + if(unlikely( utf8_check((unsigned char *)to) )) //NULL means sucess here + return NULL; + return to; + +fail_cleanup: + *d = '\0'; + return NULL; +} + +/** + * Is request complete? + * + * Check whether the request is complete. + * This function cannot check all the requests METHODS, for example, case you are working with POST, it will fail. + * + * @param begin is the first character of the sequence to analyse. + * @param end is the last character of the sequence + * @param length is the length of the total of bytes read, it is not the difference between end and begin. + * + * @return It returns 1 when the request is complete and 0 otherwise. + */ +inline int url_is_request_complete(char *begin, char *end, size_t length) { + + if ( begin == end) { + //Message cannot be complete when first and last address are the same + return 0; + } + + //This math to verify the last is valid, because we are discarding the POST + if (length > 4) { + begin = end - 4; + } + + return (strstr(begin, "\r\n\r\n"))?1:0; +} + +/** + * Find protocol + * + * Search for the string ' HTTP/' in the message given. + * + * @param s is the start of the user request. + * @return + */ +inline char *url_find_protocol(char *s) { + while(*s) { + // find the next space + while (*s && *s != ' ') s++; + + // is it SPACE + "HTTP/" ? + if(*s && !strncmp(s, " HTTP/", 6)) break; + else s++; + } + + return s; +} + +/** + * Map query string + * + * Map the query string fields that will be decoded. + * This functions must be called after to check the presence of query strings, + * here we are assuming that you already tested this. + * + * @param out the pointer to pointers that will be used to map + * @param url the input url that we are decoding. + * + * @return It returns the number of total variables in the query string. + */ +int url_map_query_string(char **out, char *url) { + (void)out; + (void)url; + int count = 0; + + //First we try to parse considering that there was not URL encode process + char *moveme = url; + char *ptr; + + //We always we have at least one here, so I can set this. + out[count++] = moveme; + while(moveme) { + ptr = strchr((moveme+1), '&'); + if(ptr) { + out[count++] = ptr; + } + + moveme = ptr; + } + + //I could not find any '&', so I am assuming now it is like '%26' + if (count == 1) { + moveme = url; + while(moveme) { + ptr = strchr((moveme+1), '%'); + if(ptr) { + char *test = (ptr+1); + if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) { + out[count++] = ptr; + } + } + moveme = ptr; + } + } + + return count; +} + +/** + * Parse query string + * + * Parse the query string mapped and store it inside output. + * + * @param output is a vector where I will store the string. + * @param max is the maximum length of the output + * @param map the map done by the function url_map_query_string. + * @param total the total number of variables inside map + * + * @return It returns 0 on success and -1 otherwise + */ +int url_parse_query_string(char *output, size_t max, char **map, int total) { + if(!total) { + return 0; + } + + int counter, next; + size_t length; + char *end; + char *begin = map[0]; + char save; + size_t copied = 0; + for(counter = 0, next=1 ; next <= total ; ++counter, ++next) { + if (next != total) { + end = map[next]; + length = (size_t) (end - begin); + save = *end; + *end = 0x00; + } else { + length = strlen(begin); + end = NULL; + } + length++; + + if (length > (max - copied)) { + error("Parsing query string: we cannot parse a query string so big"); + break; + } + + if(!url_decode_r(output, begin, length)) { + return -1; + } + length = strlen(output); + copied += length; + output += length; + + begin = end; + if (begin) { + *begin = save; + } + } + + return 0; } diff --git a/libnetdata/url/url.h b/libnetdata/url/url.h index 6cef6d7a8..10f3fe176 100644 --- a/libnetdata/url/url.h +++ b/libnetdata/url/url.h @@ -25,4 +25,11 @@ extern char *url_decode(char *str); extern char *url_decode_r(char *to, char *url, size_t size); +#define WEB_FIELDS_MAX 400 +extern int url_map_query_string(char **out, char *url); +extern int url_parse_query_string(char *output, size_t max, char **map, int total); + +extern int url_is_request_complete(char *begin,char *end,size_t length); +extern char *url_find_protocol(char *s); + #endif /* NETDATA_URL_H */ diff --git a/netdata-installer.sh b/netdata-installer.sh index a0c3f828a..a79f41c15 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -199,6 +199,7 @@ NETDATA_PREFIX= LIBS_ARE_HERE=0 NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}" RELEASE_CHANNEL="nightly" +IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY:-"no"}" while [ -n "${1}" ]; do case "${1}" in "--zlib-is-really-here") LIBS_ARE_HERE=1;; @@ -984,8 +985,13 @@ fi # ----------------------------------------------------------------------------- progress "Copy uninstaller" -sed "s|ENVIRONMENT_FILE=\"/etc/netdata/.environment\"|ENVIRONMENT_FILE=\"${NETDATA_PREFIX}/etc/netdata/.environment\"|" packaging/installer/netdata-uninstaller.sh > ${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh -chmod 750 ${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh +if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh ]; then + echo >&2 "Removing uninstaller from old location" + rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh; +fi + +sed "s|ENVIRONMENT_FILE=\"/etc/netdata/.environment\"|ENVIRONMENT_FILE=\"${NETDATA_PREFIX}/etc/netdata/.environment\"|" packaging/installer/netdata-uninstaller.sh > ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh +chmod 750 ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh # ----------------------------------------------------------------------------- progress "Basic netdata instructions" @@ -1006,19 +1012,24 @@ To start netdata run: ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_START_CMD}${TPUT_RESET} END -echo >&2 "Uninstall script copied to: ${TPUT_RED}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh${TPUT_RESET}" +echo >&2 "Uninstall script copied to: ${TPUT_RED}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh${TPUT_RESET}" echo >&2 progress "Install netdata updater tool" +if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh ]; then + echo >&2 "Removing updater from previous location" + rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh +fi + if [ -f "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" ]; then - sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" || exit 1 + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || exit 1 else - sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" || exit 1 + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || exit 1 fi -chmod 0755 ${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh -echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh${TPUT_RESET}" +chmod 0755 ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh +echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh${TPUT_RESET}" echo >&2 # Figure out the cron directory for the distro @@ -1042,7 +1053,7 @@ else echo >&2 "Adding to cron" rm -f "${crondir}/netdata-updater" - ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" "${crondir}/netdata-updater" + ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" "${crondir}/netdata-updater" echo >&2 "Auto-updating has been enabled. Updater script linked to: ${TPUT_RED}${TPUT_BOLD}${crondir}/netdata-update${TPUT_RESET}" echo >&2 @@ -1061,7 +1072,10 @@ else fi fi +progress "Wrap up environment set up" + # Save environment variables +echo >&2 "Preparing .environment file" cat < "${NETDATA_USER_CONFIG_DIR}/.environment" # Created by installer PATH="${PATH}" @@ -1073,10 +1087,14 @@ INSTALL_UID="${UID}" NETDATA_GROUP="${NETDATA_GROUP}" REINSTALL_COMMAND="${REINSTALL_COMMAND}" RELEASE_CHANNEL="${RELEASE_CHANNEL}" -# This value is meant to be populated by autoupdater (if enabled) -NETDATA_TARBALL_CHECKSUM="new_installation" +IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY}" +NETDATA_LIB_DIR="${NETDATA_LIB_DIR}" EOF +echo >&2 "Setting netdata.tarball.checksum to 'new_installation'" +cat < "${NETDATA_LIB_DIR}/netdata.tarball.checksum" +new_installation +EOF # ----------------------------------------------------------------------------- echo >&2 diff --git a/netdata.spec.in b/netdata.spec.in index 25b5f9a46..d686906f4 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -92,6 +92,7 @@ URL: http://my-netdata.io # Build dependencies # BuildRequires: gcc +BuildRequires: gcc-c++ BuildRequires: make BuildRequires: git BuildRequires: autoconf @@ -190,6 +191,26 @@ BuildRequires: cups-devel Requires: cups # end - cups plugin dependencies +# Prometheus remote write dependencies +BuildRequires: snappy-devel +BuildRequires: protobuf-devel +%if 0%{?suse_version} +BuildRequires: libprotobuf-c-devel +%else +BuildRequires: protobuf-c-devel +%endif + +%if 0%{?suse_version} +Requires: libsnappy1 +Requires: protobuf-c +Requires: libprotobuf15 +%else +Requires: snappy +Requires: protobuf-c +Requires: protobuf +%endif +# end - prometheus remote write dependencies + # ##################################################################### # End of dependency management configuration # ##################################################################### diff --git a/packaging/DISTRIBUTIONS.md b/packaging/DISTRIBUTIONS.md new file mode 100644 index 000000000..d180f25f6 --- /dev/null +++ b/packaging/DISTRIBUTIONS.md @@ -0,0 +1,37 @@ +# Netdata distribution support matrix +![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true) + +In the following table we've listed Netdata's official supported operating systems. We detail the distributions, flavors, and the level of support Netdata is currently capable to provide. + +The following table is a work in progress. We have concluded on the list of distributions +that we currently supporting and we are working on documenting our current state so that our users +have complete visibility over the range of support. + +Distribution | Family | Architecture | Code health | Installer support | Kickstart support | Binary packaging support | Integrity testing (CI) | Functionality testing (CI) | Community support +:------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :-------------------- +14.04.6 LTS (Trusty Tahr) | Ubuntu | | | | | | | | +16.04.6 LTS (Xenial Xerus) | Ubuntu | | | | | | | | +18.04.2 LTS (Bionic Beaver) | Ubuntu | | | | | | | | +19.04 (Disco Dingo) Latest | Ubuntu | | | | | | | | +Debian 7 (Wheezy) | Debian | | | | | | | | +Debian 8 (Jessie) | Debian | | | | | | | | +Debian 9 (Stretch) | Debian | | | | | | | | +Debian 10 (Buster) | Debian | | | | | | | | +Versions 6.* | RHEL | | | | | | | | +Versions 7.* | RHEL | | | | | | | | +Versions 8.* | RHEL | | | | | | | | +Fedora 28 | Fedora | | | | | | | | +Fedora 29 | Fedora | | | | | | | | +Fedora 30 | Fedora | | | | | | | | +Fedora 31 | Fedora | | | | | | | | +CentOS 6.* | Cent OS | | | | | | | | | +CentOS 7.* | Cent OS | | | | | | | | | +CentOS 8.* | Cent OS | | | | | | | | | +Open SuSE Leap 15.0 | Open SuSE | | | | | | | | +Open SuSE Leap 15.1 | Open SuSE | | | | | | | | +Open SuSE Tumbleweed (latest) | Open SuSE | | | | | | | | +SuSE Enterprise Linux Server 11 | SLES | | | | | | | | +SuSE Enterprise Linux Server 12 | SLES | | | | | | | | +SuSE Enterprise Linux Server 15 | SLES | | | | | | | | +Arch Linux (latest) | Arch | | | | | | | | +All other linux | Other | | | | | | | | diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 98fdce5c9..4be2d93b2 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -58,9 +58,11 @@ COPY --from=builder /app / # Configure system ARG NETDATA_UID=201 ARG NETDATA_GID=201 +ENV DOCKER_GRP netdata +ENV DOCKER_USR netdata RUN \ # provide judy installation to base image - apk add make alpine-sdk && \ + apk add make alpine-sdk shadow && \ cd /judy-${JUDY_VER} && make install && cd / && \ # Clean the source stuff once judy is installed rm -rf /judy-${JUDY_VER} && apk del make alpine-sdk && \ @@ -69,8 +71,8 @@ RUN \ chmod 4755 /usr/local/bin/fping && \ mkdir -p /var/log/netdata && \ # Add netdata user - addgroup -g ${NETDATA_GID} -S netdata && \ - adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ + addgroup -g ${NETDATA_GID} -S "${DOCKER_GRP}" && \ + adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G "${DOCKER_GRP}" "${DOCKER_USR}" && \ # Apply the permissions as described in # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories chown -R root:netdata /etc/netdata && \ diff --git a/packaging/docker/README.md b/packaging/docker/README.md index 0bf416cd4..4e21918ec 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -24,9 +24,10 @@ This is good for an internal network or to quickly analyse a host. ```bash docker run -d --name=netdata \ -p 19999:19999 \ + -v /etc/passwd:/host/etc/passwd:ro \ + -v /etc/group:/host/etc/group:ro \ -v /proc:/host/proc:ro \ -v /sys:/host/sys:ro \ - -v /var/run/docker.sock:/var/run/docker.sock:ro \ --cap-add SYS_PTRACE \ --security-opt apparmor=unconfined \ netdata/netdata @@ -47,35 +48,57 @@ services: security_opt: - apparmor:unconfined volumes: + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro - /proc:/host/proc:ro - /sys:/host/sys:ro - - /var/run/docker.sock:/var/run/docker.sock:ro ``` +If you don't want to use the apps.plugin functionality, you can remove the mounts of `/etc/passwd` and `/etc/group` (they are used to get proper user and group names for the monitored host) to get slightly better security. + ### Docker container names resolution -If you want to have your container names resolved by netdata, you need to do two things: -1) Make netdata user be part of the group that owns the socket. - To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the netdata container, - where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host. - This group number can be found by running the following (if socket group ownership is docker): - ```bash - grep docker /etc/group | cut -d ':' -f 3 - ``` - -2) Change docker socket access level to read/write like so: - from - ``` - /var/run/docker.sock:/var/run/docker.sock:ro - ``` - - change to - ``` - /var/run/docker.sock:/var/run/docker.sock:rw - ``` +There are a few options for resolving container names within netdata. Some methods of doing so will allow root access to your machine from within the container. Please read the following carefully. + +#### Docker Socket Proxy (Safest Option) + +Deploy a Docker socket proxy that accepts and filter out requests using something like [HAProxy](https://docs.netdata.cloud/docs/running-behind-haproxy/) so that it restricts connections to read-only access to the CONTAINERS endpoint. + +The reason it's safer to expose the socket to the proxy is because netdata has a TCP port exposed outside the Docker network. Access to the proxy container is limited to only within the network. + +#### Giving group access to Docker Socket (Less safe) **Important Note**: You should seriously consider the necessity of activating this option, -as it grants to the netdata user access to the privileged socket connection of docker service +as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine. + +If you want to have your container names resolved by Netdata, make the `netdata` user be part of the group that owns the socket. + +To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the Netdata container, +where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host. + +This group number can be found by running the following (if socket group ownership is docker): + +```bash +grep docker /etc/group | cut -d ':' -f 3 +``` + +#### Running as root (Unsafe) + +**Important Note**: You should seriously consider the necessity of activating this option, +as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine. + +```yaml +version: '3' +services: + netdata: + image: netdata/netdata + # ... rest of your config ... + volumes: + # ... other volumes ... + - /var/run/docker.sock:/var/run/docker.sock:ro + environment: + - DOCKER_USR=root +``` ### Pass command line options to Netdata @@ -132,6 +155,8 @@ services: security_opt: - apparmor:unconfined volumes: + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro - /proc:/host/proc:ro - /sys:/host/sys:ro - /var/run/docker.sock:/var/run/docker.sock:ro diff --git a/packaging/docker/build-test.sh b/packaging/docker/build-test.sh index a7e31d4f4..3c55e1736 100755 --- a/packaging/docker/build-test.sh +++ b/packaging/docker/build-test.sh @@ -46,27 +46,29 @@ do esac done -if [ -n "${REPOSITORY}" ] && [ -n "${VERSION}" ] && [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then +if [ -n "${REPOSITORY}" ]; then if [ $DOBUILD -eq 1 ] ; then - echo "Building ${VERSION} of ${REPOSITORY} container" + echo "Building ${VERSION:-latest} of ${REPOSITORY} container" docker run --rm --privileged multiarch/qemu-user-static:register --reset # Build images using multi-arch Dockerfile. - eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION}" --file packaging/docker/Dockerfile ./ + eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION:-latest}" --file packaging/docker/Dockerfile ./ # Create temporary docker CLI config with experimental features enabled (manifests v2 need it) mkdir -p /tmp/docker #echo '{"experimental":"enabled"}' > /tmp/docker/config.json fi - # Login to docker hub to allow futher operations - echo "Logging into docker" - echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin + if [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then + # Login to docker hub to allow futher operations + echo "Logging into docker" + echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin - echo "Pushing ${REPOSITORY}:${VERSION}" - docker --config /tmp/docker push "${REPOSITORY}:${VERSION}" + echo "Pushing ${REPOSITORY}:${VERSION}" + docker --config /tmp/docker push "${REPOSITORY}:${VERSION}" + fi else - echo "Missing parameter. REPOSITORY=${REPOSITORY} VERSION=${VERSION} DOCKER_USERNAME=${DOCKER_USERNAME} DOCKER_PWD=${DOCKER_PWD}" + echo "Missing parameter. REPOSITORY=${REPOSITORY}" printhelp exit 1 fi diff --git a/packaging/docker/publish.sh b/packaging/docker/publish.sh index fd1883afb..5a9e67ede 100755 --- a/packaging/docker/publish.sh +++ b/packaging/docker/publish.sh @@ -39,10 +39,6 @@ if [ ! -z ${DEVEL+x} ]; then declare -a ARCHS=(${DEVEL_ARCHS[@]}) fi -echo "Syncing repository with latest changes (We may have updated with package versions)" -git checkout master -git pull - # Ensure there is a version, the most appropriate one if [ "${VERSION}" == "" ]; then VERSION=$(git tag --points-at) diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh index 2b5047cd0..f4377d458 100755 --- a/packaging/docker/run.sh +++ b/packaging/docker/run.sh @@ -9,41 +9,15 @@ set -e echo "Netdata entrypoint script starting" if [ ${RESCRAMBLE+x} ]; then - echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" - apk upgrade --update-cache --available + echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" + apk upgrade --update-cache --available fi -create_group_and_assign_to_user() { - local local_DOCKER_GROUP="$1" - local local_DOCKER_GID="$2" - local local_DOCKER_USR="$3" - - echo >&2 "Adding group with ID ${local_DOCKER_GID} and name '${local_DOCKER_GROUP}'" - addgroup -g "${local_DOCKER_GID}" "${local_DOCKER_GROUP}" || echo >&2 "Could not add group ${local_DOCKER_GROUP} with ID ${local_DOCKER_GID}, its already there probably" - - echo >&2 "Adding user '${local_DOCKER_USR}' to group '${local_DOCKER_GROUP}/${local_DOCKER_GID}'" - sed -i "s/:${local_DOCKER_GID}:$/:${local_DOCKER_GID}:${local_DOCKER_USR}/g" /etc/group - - # Make sure we use the right docker group - GRP_TO_ASSIGN="$(grep ":x:${local_DOCKER_GID}:" /etc/group | cut -d':' -f1)" - if [ -z "${GRP_TO_ASSIGN}" ]; then - echo >&2 "Could not find group ID ${local_DOCKER_GID} in /etc/group. Check your logs and report it if this is an unrecovereable error" - else - echo >&2 "Group creation and assignment completed, netdata was assigned to group ${GRP_TO_ASSIGN}/${local_DOCKER_GID}" - echo "${GRP_TO_ASSIGN}" - fi -} - -DOCKER_USR="netdata" -DOCKER_SOCKET="/var/run/docker.sock" -DOCKER_GROUP="docker" - -if [ -S "${DOCKER_SOCKET}" ] && [ -n "${PGID}" ]; then - GRP=$(create_group_and_assign_to_user "${DOCKER_GROUP}" "${PGID}" "${DOCKER_USR}") - if [ -n "${GRP}" ]; then - echo "Adjusting ownership of mapped docker socket '${DOCKER_SOCKET}' to root:${GRP}" - chown "root:${GRP}" "${DOCKER_SOCKET}" || echo "Failed to change ownership on docker socket, container name resolution might not work" - fi +if [ -n "${PGID}" ]; then + echo "Creating docker group ${PGID}" + addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably" + echo "Assign netdata user to docker group ${PGID}" + usermod -a -G ${PGID} ${DOCKER_USR} || echo >&2 "Could not add netdata user to group docker with ID ${PGID}" fi exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/packaging/installer/README.md b/packaging/installer/README.md index b10ffa05a..67a7a9120 100644 --- a/packaging/installer/README.md +++ b/packaging/installer/README.md @@ -6,7 +6,6 @@ The best way to install Netdata is directly from source. Our **automatic install !!! warning You can find Netdata packages distributed by third parties. In many cases, these packages are either too old or broken. So, the suggested ways to install Netdata are the ones in this page. - **We are currently working to provide our binary packages for all Linux distros.** Stay tuned... 1. [Automatic one line installation](#one-line-installation), easy installation from source, **this is the default** 2. [Install pre-built static binary on any 64bit Linux](#linux-64bit-pre-built-static-binary) @@ -17,6 +16,7 @@ The best way to install Netdata is directly from source. Our **automatic install 7. [Enable on FreeNAS Corral](#freenas) 8. [Install on macOS (OS X)](#macos) 9. [Install on a Kubernetes cluster](https://github.com/netdata/helmchart#netdata-helm-chart-for-kubernetes-deployments) +10. [Install using binary packages](#binary-packages) See also the list of Netdata [package maintainers](../maintainers) for ASUSTOR NAS, OpenWRT, ReadyNAS, etc. @@ -24,26 +24,27 @@ Note: From Netdata v1.12 and above, anonymous usage information is collected by --- -## One line installation +## One-line installation +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) -> This method is **fully automatic on all Linux** distributions. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information. +This method is **fully automatic on all Linux distributions**. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information. -To install Netdata from source and keep it up to date automatically, run the following: +To install Netdata from source, and keep it up to date with our **nightly releases** automatically, run the following: -```bash -bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` bash +$ bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` -*(do not `sudo` this command, it will do it by itself as needed)* +!!! note + Do not use `sudo` for the one-line installer—it will escalate privileges itself if needed. -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) + To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](#nightly-vs-stable-releases). -
Click here for more information and advanced use of this command. +
Click here for more information and advanced use of the one-line installation script.
Verify the integrity of the script with this: -```bash +``` bash [ "8a2b054081a108dff915994ce77f2f2d" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` *It should print `OK, VALID` if the script is the one we ship.* @@ -56,48 +57,47 @@ The `kickstart.sh` script: - installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated daily (you will get a message from cron only if the update fails). - For QA purposes, this installation method lets us know if it succeed or failed. -The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to change the installation directory, enable/disable plugins, etc (check below). +The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters: -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. -You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. +- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. +- `--dont-start-it`: Prevent the installer from starting Netdata automatically. +- `--stable-channel`: Automatically update only on the release of new major versions. +- `--no-updates`: Prevent automatic updates of any kind. -Example: +Example using all the above parameters: ```bash - bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait --dont-start-it --stable-channel +$ bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait --dont-start-it --no-updates --stable-channel ``` -If you don't want to receive automatic updates, add `--no-updates` when executing `kickstart.sh` script. - -
 
+
Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). --- ## Linux 64bit pre-built static binary +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) -You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system -(even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). -You can also use these packages on systems with broken or unsupported package managers. +You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system (even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). You can also use these packages on systems with broken or unsupported package managers. -To install Netdata with a binary package on any Linux distro, any kernel version - for **Intel/AMD 64bit** hosts, run the following: +To install Netdata from a binary package on any Linux distro and any kernel version on **Intel/AMD 64bit** systems, and keep it up to date with our **nightly releases** automatically, run the following: ```bash - - bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) - +$ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) ``` -*(do not `sudo` this command, it will do it by itself as needed; if the target system does not have `bash` installed, see below for instructions to run it without `bash`)* +!!! note + Do not use `sudo` for this installer—it will escalate privileges itself if needed. -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) + To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](README.md#nightly-vs-stable-releases). -> The static builds install Netdata at **`/opt/netdata`** + If your system does not have `bash` installed, open the `More information and advanced uses of the kickstart-static64.sh script` dropdown for instructions to run the installer without `bash`. + + This script installs Netdata at `/opt/netdata`.
Click here for more information and advanced use of this command.
Verify the integrity of the script with this: ```bash @@ -106,15 +106,17 @@ Verify the integrity of the script with this: *It should print `OK, VALID` if the script is the one we ship.* -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. -You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. +The `kickstart-static64.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters: -Example: +- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. +- `--dont-start-it`: Prevent the installer from starting Netdata automatically. +- `--stable-channel`: Automatically update only on the release of new major versions. +- `--no-updates`: Prevent automatic updates of any kind. -```bash - - bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it --stable-channel +Example using all the above parameters: +```bash +$ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it --no-updates --stable-channel ``` If your shell fails to handle the above one liner, do this: @@ -135,7 +137,7 @@ sh /tmp/kickstart-static64.sh - The same files can be used for updates too. - For QA purposes, this installation method lets us know if it succeed or failed. -
 
+ Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). @@ -205,16 +207,16 @@ This is how to do it by hand: ```sh # Debian / Ubuntu -apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl +apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl python # Fedora -dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils +dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python # CentOS / Red Hat Enterprise Linux -yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel +yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel make nc pkgconfig python zlib-devel # openSUSE -zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils +zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python ``` @@ -293,6 +295,26 @@ To apply the changes you made, you have to restart Netdata. --- +### Binary Packages +![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true) + +We provide our own flavour of binary packages for the most common operating systems that comply with .RPM and .DEB packaging formats. + +We have currently released packages following the .RPM format with version [1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0). +We have planned to release packages following the .DEB format with version [1.17.0](https://github.com/netdata/netdata/releases/tag/v1.17.0). +Early adopters may experiment with our .DEB formatted packages using our nightly releases. Our current packaging infrastructure provider is [Package Cloud](https://packagecloud.io). + +Netdata is committed to support installation of our solution to all operating systems. This is a constant battle for Netdata, as we strive to automate and make things easier for our users. For the operating system support matrix, please visit our [distributions](../../packaging/DISTRIBUTIONS.md) support page. + +We provide two separate repositories, one for our stable releases and one for our nightly releases. + +1. Stable releases: Our stable production releases are hosted in [netdata/netdata](https://packagecloud.io/netdata/netdata) repository of package cloud +2. Nightly releases: Our latest releases are hosted in [netdata/netdata-edge](https://packagecloud.io/netdata/netdata-edge) repository of package cloud + +Visit the repository pages and follow the quick set-up instructions to get started. + +--- + ## Other Systems @@ -448,4 +470,38 @@ Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as a [ -x /etc/rc.netdata ] && /etc/rc.netdata start ``` + +## Nightly vs. stable releases + +The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration. + +**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws, or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when we're ready, we simply change the release tags on GitHub. That means nightly releases are stable and proven to function correctly in the vast majority of Netdata use cases. That's why nightly is the *best choice for most Netdata users*. + +**Stable**: We create stable releases whenever we believe the code has reached a major milestone. Most often, stable releases correlate with the introduction of new, significant features. Stable releases might be a better choice for those who run Netdata in *mission-critical production systems*, as updates will come more infrequently, and only after the community helps fix any bugs that might have been introduced in previous releases. + +**Pros of using nightly releases:** + + - Get the latest features and bugfixes as soon as they're available + - Receive security-related fixes immediately + - Use stable, fully-tested code that's always improving + - Leverage the same Netdata experience our community is using + +**Pros of using stable releases:** + + - Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation + - Retain more control over the Netdata version you use + + +## Automatic updates + +By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels. + +If you would prefer to manually update your Netdata agent, you can disable automatic updates by using the `--no-updates` option when you install or update Netdata using the [one-line installation script](#one-line-installation). + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates +``` + +With automatic updates disabled, you can choose exactly when and how you [update Netdata](UPDATE.md). + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md index 765184d39..43d03b002 100644 --- a/packaging/installer/UNINSTALL.md +++ b/packaging/installer/UNINSTALL.md @@ -16,7 +16,7 @@ NETDATA_ADDED_TO_GROUPS="" # Additional groups for a user ru ``` 3. Run `netdata-uninstaller.sh` as follows ``` -${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh --yes --env +${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env ``` Note: Existing installations may still need to download the file if it's not present. diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh index 0bbdaac2c..41ada6234 100755 --- a/packaging/installer/netdata-uninstaller.sh +++ b/packaging/installer/netdata-uninstaller.sh @@ -2,14 +2,15 @@ #shellcheck disable=SC2181 # # This is the netdata uninstaller script +# # Variables needed by script and taken from '.environment' file: # - NETDATA_PREFIX # - NETDATA_ADDED_TO_GROUPS # # Copyright: SPDX-License-Identifier: GPL-3.0-or-later # -# Author: Paul Emm. Katsoulakis -# +# Author: Paweł Krupa +# Author: Pavlos Emm. Katsoulakis usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index 83031f3aa..6609edd5a 100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash #shellcheck disable=SC2164 - -# this script will uninstall netdata - +# +# Netdata updater utility +# # Variables needed by script: # - PATH # - CFLAGS @@ -11,6 +11,12 @@ # - NETDATA_TARBALL_URL # - NETDATA_TARBALL_CHECKSUM_URL # - NETDATA_TARBALL_CHECKSUM +# - NETDATA_PREFIX / NETDATA_LIB_DIR (After 1.16.1 we will only depend on lib dir) +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author: Paweł Krupa +# Author: Pavlos Emm. Katsoulakis info() { echo >&3 "$(date) : INFO: " "${@}" @@ -67,21 +73,26 @@ download() { } set_tarball_urls() { + local extension="tar.gz" if [ ! -z "${NETDATA_LOCAL_TARBAL_OVERRIDE}" ]; then info "Not fetching remote tarballs, local override was given" return fi + if [ "$2" == "yes" ]; then + extension="gz.run" + fi + if [ "$1" = "stable" ]; then local latest # Simple version # latest="$(curl -sSL https://api.github.com/repos/netdata/netdata/releases/latest | grep tag_name | cut -d'"' -f4)" latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)" - export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.tar.gz" + export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.${extension}" export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt" else - export NETDATA_TARBALL_URL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.tar.gz" + export NETDATA_TARBALL_URL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.${extension}" export NETDATA_TARBALL_CHECKSUM_URL="https://storage.googleapis.com/netdata-nightlies/sha256sums.txt" fi } @@ -131,10 +142,12 @@ update() { info "Re-installing netdata..." eval "${REINSTALL_COMMAND} --dont-wait ${do_not_start}" >&3 2>&3 || fatal "FAILED TO COMPILE/INSTALL NETDATA" + + # We no longer store checksum info here. but leave this so that we clean up all environment files upon next update. sed -i '/NETDATA_TARBALL/d' "${ENVIRONMENT_FILE}" - cat <>"${ENVIRONMENT_FILE}" -NETDATA_TARBALL_CHECKSUM="$NEW_CHECKSUM" -EOF + + info "Updating tarball checksum info" + echo "${NEW_CHECKSUM}" > "${NETDATA_LIB_DIR}/netdata.tarball.checksum" fi rm -rf "${tmpdir}" >&3 2>&3 @@ -148,6 +161,12 @@ EOF # shellcheck source=/dev/null source "${ENVIRONMENT_FILE}" || exit 1 +# We dont expect to find lib dir variable on older installations, so load this path if none found +export NETDATA_LIB_DIR="${NETDATA_LIB_DIR:-${NETDATA_PREFIX}/var/lib/netdata}" + +# Source the tarbal checksum, if not already available from environment (for existing installations with the old logic) +[[ -z "${NETDATA_TARBALL_CHECKSUM}" ]] && [[ -f ${NETDATA_LIB_DIR}/netdata.tarball.checksum ]] && NETDATA_TARBALL_CHECKSUM="$(cat "${NETDATA_LIB_DIR}/netdata.tarball.checksum")" + if [ "${INSTALL_UID}" != "$(id -u)" ]; then fatal "You are running this script as user with uid $(id -u). We recommend to run this script as root (user with uid 0)" fi @@ -165,7 +184,34 @@ else exec 3>"${logfile}" fi -set_tarball_urls "${RELEASE_CHANNEL}" +set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}" + +if [ "${IS_NETDATA_STATIC_BINARY}" == "yes" ]; then + TMPDIR="$(create_tmp_directory)" + PREVDIR="$(pwd)" + + echo >&2 "Entering ${TMPDIR}" + cd "${TMPDIR}" + + download "${NETDATA_TARBALL_CHECKSUM_URL}" "${TMPDIR}/sha256sum.txt" + download "${NETDATA_TARBALL_URL}" "${TMPDIR}/netdata-latest.gz.run" + if ! grep netdata-latest.gz.run "${TMPDIR}/sha256sum.txt" | safe_sha256sum -c - >/dev/null 2>&1; then + fatal "Static binary checksum validation failed. Stopping netdata installation and leaving binary in ${TMPDIR}" + fi + + # Do not pass any options other than the accept, for now + sh "${TMPDIR}/netdata-latest.gz.run" --accept + + #shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + rm -r "${TMPDIR}" + else + echo >&2 "NOTE: did not remove: ${TMPDIR}" + fi + echo >&2 "Switching back to ${PREVDIR}" + cd "${PREVDIR}" +else + # the installer updates this script - so we run and exit in a single line + update && exit 0 +fi -# the installer updates this script - so we run and exit in a single line -update && exit 0 diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh index 71ea0f63a..80fba3158 100755 --- a/packaging/makeself/jobs/70-netdata-git.install.sh +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -13,6 +13,10 @@ else # export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness" fi +# We export this to 'yes', installer sets this to .environment. +# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one +export IS_NETDATA_STATIC_BINARY="yes" + run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \ --dont-wait \ --dont-start-it \ diff --git a/packaging/version b/packaging/version index a406138ee..e17963c5d 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.16.0 +v1.16.1 diff --git a/streaming/README.md b/streaming/README.md index 3e58f1f06..1bfbb2362 100644 --- a/streaming/README.md +++ b/streaming/README.md @@ -138,7 +138,7 @@ headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not proxy with db|not `none`|not `none`|`yes`|possible|possible|yes central netdata|not `none`|not `none`|`no`|possible|possible|yes -For the options to encrypt the data stream between the slave and the master, refer to [securing the communication](#securing-the-communication) +For the options to encrypt the data stream between the slave and the master, refer to [securing the communication](#securing-streaming-communications) ##### options for the receiving node @@ -213,40 +213,91 @@ The receiving end (`proxy` or `master`) logs entries like these: For netdata v1.9+, streaming can also be monitored via `access.log`. -### Securing the communication +### Securing streaming communications -Netdata does not activate TLS encryption by default. To encrypt the connection, you first need to [enable TLS support](../web/server/#enabling-tls-support) on the master. With encryption enabled on the receiving side, we need to instruct the slave to use SSL as well. On the slave's `stream.conf`, configure the destination as follows: +Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS support](../web/server/#enabling-tls-support) on the master. With encryption enabled on the receiving side, you need to instruct the slave to use TLS/SSL as well. On the slave's `stream.conf`, configure the destination as follows: ``` [stream] destination = host:port:SSL ``` -The word SSL appended to the end of the destination tells the slave that the connection must be encrypted. +The word `SSL` appended to the end of the destination tells the slave that connections must be encrypted. + +??? info "Differences in TLS and SSL terminology" + While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`. #### Certificate verification -When SSL is enabled on the slave, the default behavior will be do not connect with the master unless the server's certificate can be verified via the default chain. In case you want to avoid this check, add to the slave's `stream.conf` the following: +When TLS/SSL is enabled on the slave, the default behavior will be to not connect with the master unless the server's certificate can be verified via the default chain. In case you want to avoid this check, add the following to the slave's `stream.conf` file: ``` [stream] ssl skip certificate verification = yes ``` +#### Trusted certificate + +If you've enabled [certificate verification](#certificate-verification), you might see errors from the OpenSSL library when there's a problem with checking the certificate chain (`X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY`). More importantly, OpenSSL will reject self-signed certificates. + +Given these known issues, you have two options. If you trust your certificate, you can set the options `CApath` and `CAfile` to inform Netdata where your certificates, and the certificate trusted file, are stored. + +For more details about these options, you can read about [verify locations](https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html). + +Before you changed your streaming configuration, you need to copy your trusted certificate to your slave system and add the certificate to OpenSSL's list. + +On most Linux distributions, the `update-ca-certificates` command searches inside the `/usr/share/ca-certificates` directory for certificates. You should double-check by reading the `update-ca-certificate` manual (`man update-ca-certificate`), and then change the directory in the below commands if needed. + +If you have `sudo` configured on your slave system, you can use that to run the following commands. If not, you'll have to log in as `root` to complete them. + +``` +# mkdir /usr/share/ca-certificates/netdata +# cp master_cert.pem /usr/share/ca-certificates/netdata/master_cert.crt +# chown -R netdata.netdata /usr/share/ca-certificates/netdata/ +``` + +First, you create a new directory to store your certificates for Netdata. Next, you need to change the extension on your certificate from `.pem` to `.crt` so it's compatible with `update-ca-certificate`. Finally, you need to change permissions so the user that runs Netdata can access the directory where you copied in your certificate. + +Next, edit the file `/etc/ca-certificates.conf` and add the following line: + +``` +netdata/master_cert.crt +``` + +Now you update the list of certificates running the following, again either as `sudo` or `root`: + +``` +# update-ca-certificates +``` + +!!! note + Some Linux distributions have different methods of updating the certificate list. For more details, please read this guide on [addding trusted root certificates](https://github.com/Busindre/How-to-Add-trusted-root-certificates). + +Once you update your certificate list, you can set the stream parameters for Netdata to trust the master certificate. Open `stream.conf` for editing and change the following lines: + +``` +[stream] + CApath = /etc/ssl/certs/ + CAfile = /etc/ssl/certs/master_cert.pem +``` + +With this configuration, the `CApath` option tells Netdata to search for trusted certificates inside `/etc/ssl/certs`. The `CAfile` option specifies the Netdata master certificate is located at `/etc/ssl/certs/master_cert.pem`. With this configuration, you can skip using the system's entire list of certificates and use Netdata's master certificate instead. + #### Expected behaviors -With the introduction of SSL, the master-slave communication behaves as shown in the table below, depending on the following configurations: -- Master TLS (Yes/No): Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`. -- Master port SSL (-/force/optional): Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming. -- Slave TLS (Yes/No): Whether the destination in the slave's `stream.conf` has `:SSL` at the end. -- Slave SSL Verification (yes/no): Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no). +With the introduction of TLS/SSL, the master-slave communication behaves as shown in the table below, depending on the following configurations: + +- **Master TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`. +- **Master port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming. +- **Slave TLS (Yes/No)**: Whether the destination in the slave's `stream.conf` has `:SSL` at the end. +- **Slave TLS Verification (yes/no)**: Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no). Master TLS enabled | Master port SSL | Slave TLS | Slave SSL Ver. | Behavior :------:|:-----:|:-----:|:-----:|:-------- No | - | No | no | Legacy behavior. The master-slave stream is unencrypted. Yes | force | No | no | The master rejects the slave connection. Yes | -/optional | No | no | The master-slave stream is unencrypted (expected situation for legacy slaves and newer masters) -Yes | -/force/optional | Yes | no | The master-slave stream is encrypted, provided that the master has a valid SSL certificate. Otherwise, the slave refuses to connect. +Yes | -/force/optional | Yes | no | The master-slave stream is encrypted, provided that the master has a valid TLS/SSL certificate. Otherwise, the slave refuses to connect. Yes | -/force/optional | Yes | yes | The master-slave stream is encrypted. ## Viewing remote host dashboards, using mirrored databases diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c index 954b1d7d1..59913c24b 100644 --- a/streaming/rrdpush.c +++ b/streaming/rrdpush.c @@ -48,6 +48,11 @@ unsigned int default_rrdpush_enabled = 0; char *default_rrdpush_destination = NULL; char *default_rrdpush_api_key = NULL; char *default_rrdpush_send_charts_matching = NULL; +#ifdef ENABLE_HTTPS +int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL; +char *netdata_ssl_ca_path = NULL; +char *netdata_ssl_ca_file = NULL; +#endif static void load_stream_conf() { errno = 0; @@ -89,13 +94,17 @@ int rrdpush_init() { } } } + char *invalid_certificate = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", "no"); if ( !strcmp(invalid_certificate,"yes")){ if (netdata_validate_server == NETDATA_SSL_VALID_CERTIFICATE){ - info("The Netdata is configured to accept invalid certificate."); + info("Netdata is configured to accept invalid SSL certificate."); netdata_validate_server = NETDATA_SSL_INVALID_CERTIFICATE; } } + + netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", "/etc/ssl/certs/"); + netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", "/etc/ssl/certs/certs.pem"); #endif return default_rrdpush_enabled; @@ -652,6 +661,7 @@ void *rrdpush_sender_thread(void *ptr) { #ifdef ENABLE_HTTPS if (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE ){ security_start_ssl(NETDATA_SSL_CONTEXT_STREAMING); + security_location_for_context(netdata_client_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path); } #endif @@ -801,7 +811,17 @@ void *rrdpush_sender_thread(void *ptr) { rrdpush_buffer_lock(host); debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_sender_buffer)); - ssize_t ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); + ssize_t ret; +#ifdef ENABLE_HTTPS + SSL *conn = host->ssl.conn ; + if(conn && !host->ssl.flags) { + ret = SSL_write(conn,&host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin); + } else { + ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); + } +#else + ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); +#endif if (unlikely(ret == -1)) { if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) { debug(D_STREAM, "STREAM: Send failed - closing socket..."); @@ -1059,6 +1079,8 @@ static int rrdpush_receive(int fd info("STREAM %s [receive from [%s]:%s]: initializing communication...", host->hostname, client_ip, client_port); #ifdef ENABLE_HTTPS + host->ssl.conn = ssl->conn; + host->ssl.flags = ssl->flags; if(send_timeout(ssl,fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { #else if(send_timeout(fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { diff --git a/streaming/stream.conf b/streaming/stream.conf index 0d360cc24..fdff1f25f 100644 --- a/streaming/stream.conf +++ b/streaming/stream.conf @@ -41,6 +41,22 @@ # #ssl skip certificate verification = yes + # Certificate Authority Path + # + # OpenSSL has a default directory where the known certificates are stored, + # case it is necessary it is possible to change this rule using the variable + # "CApath" + # + #CApath = /etc/ssl/certs/ + + # Certificate Authority file + # + # When the Netdata master has certificate, that is not recognized as valid, + # we can add this certificate in the list of known certificates in CApath + # and give for Netdata as argument. + # + #CAfile = /etc/ssl/certs/cert.pem + # The API_KEY to use (as the sender) api key = diff --git a/tests/Makefile.am b/tests/Makefile.am index b0f65456e..92e6db0f3 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -5,6 +5,11 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in CLEANFILES = \ health_mgmtapi/health-cmdapi-test.sh \ +<<<<<<< HEAD + acls/acl.sh \ +======= + urls/request.sh \ +>>>>>>> 63a4cadd346df71255d2350128eebcf317e81d0f $(NULL) include $(top_srcdir)/build/subst.inc @@ -22,10 +27,14 @@ dist_noinst_DATA = \ node.d/fronius.process.spec.js \ node.d/fronius.validation.spec.js \ health_mgmtapi/health-cmdapi-test.sh.in \ + acls/acl.sh.in \ + urls/request.sh.in \ $(NULL) dist_plugins_SCRIPTS = \ health_mgmtapi/health-cmdapi-test.sh \ + acls/acl.sh \ + urls/request.sh \ $(NULL) dist_noinst_SCRIPTS = \ diff --git a/tests/acls/acl.sh.in b/tests/acls/acl.sh.in new file mode 100644 index 000000000..772d66408 --- /dev/null +++ b/tests/acls/acl.sh.in @@ -0,0 +1,119 @@ +#!/bin/bash -x +# SPDX-License-Identifier: GPL-3.0-or-later + +BASICURL="http://127.0.0.1" +BASICURLS="https://127.0.0.1" + +NETDATA_VARLIB_DIR="/var/lib/netdata" +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;43m' + +#change the previous acl file and with a new +#and store it on a new file +change_file(){ + sed "s/$1/$2/g" netdata.cfg > "$4" +} + +change_ssl_file(){ + KEYROW="ssl key = $3/key.pem" + CERTROW="ssl certificate = $3/cert.pem" + sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp + sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2 + sed "s/$1/$2/g" tmp2 > "$4" +} + +run_acl_tests() { + #Give a time for netdata start properly + sleep 2 + + curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt + curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt + curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt + curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt + curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt + + TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1) + if [ "$TOT" -ne "$4" ]; then + echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4" + rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt + killall netdata + exit 1 + elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then + echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP." + return + fi + + TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1) + if [ "$TOT" -ne "$3" ]; then + echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3." + killall netdata + rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt + exit 1 + fi + + echo -e "${GREEN}ACLs were applied correctly" +} + +CONF=$(grep "bind" netdata.cfg) +MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//') + +openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem +chown "$MUSER" key.pem cert.pem +CWD=$(pwd) + +if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then + read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key" +else + TOKEN="NULL" +fi + +change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0" +netdata -c "netdata.conf.test0" +run_acl_tests $TOKEN "$BASICURL:19999" 5 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1" +netdata -c "netdata.conf.test1" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 0 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20002" 3 5 +run_acl_tests $TOKEN "$BASICURLS:20002" 3 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2" +netdata -c "netdata.conf.test2" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20002" 3 5 +run_acl_tests $TOKEN "$BASICURLS:20002" 3 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3" +netdata -c "netdata.conf.test3" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 0 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 +killall netdata + +rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt +echo "All the tests were successful" diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg new file mode 100644 index 000000000..1dcb4a5c6 --- /dev/null +++ b/tests/acls/netdata.cfg @@ -0,0 +1,20 @@ +# netdata configuration +# +# You can download the latest version of this file, using: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# or +# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# +# You can uncomment and change any of the options below. +# The value shown in the commented settings, is the default value. +# + +[global] + run as user = netdata + + # the default database size - 1 hour + history = 3600 + + # by default do not expose the netdata port + bind to = localhost diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg new file mode 100644 index 000000000..28e0030d5 --- /dev/null +++ b/tests/acls/netdata.ssl.cfg @@ -0,0 +1,24 @@ +# netdata configuration +# +# You can download the latest version of this file, using: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# or +# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# +# You can uncomment and change any of the options below. +# The value shown in the commented settings, is the default value. +# + +[global] + run as user = netdata + + # the default database size - 1 hour + history = 3600 + + # by default do not expose the netdata port + bind to = localhost + +[web] + ssl key = + ssl certificate = diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in new file mode 100644 index 000000000..fac00bc4e --- /dev/null +++ b/tests/urls/request.sh.in @@ -0,0 +1,303 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later + +################################################################################################ +#### #### +#### GLOBAL VARIABLES #### +#### #### +################################################################################################ + +# The current time +CT=$(date +'%s') + +# The previous time +PT=$((CT - 30)) + +# The output directory where we will store the results and error +OUTDIR="tests" +OUTEDIR="encoded_tests" +OUTOPTDIR="options" +ERRDIR="etests" + +################################################################################################ +#### #### +#### FUNCTIONS #### +#### #### +################################################################################################ + +# Print error message and close script +netdata_print_error(){ + echo "Closing due error \"$1\" code \"$2\"" + exit 1 +} + +# Print the header message of the function +netdata_print_header() { + echo "$1" +} + +# Create the main directory where the results will be stored +netdata_create_directory() { + netdata_print_header "Creating directory $1" + if [ ! -d "$1" ]; then + mkdir "$1" + TEST=$? + if [ $TEST -ne 0 ]; then + netdata_print_error "Cannot create directory $?" + fi + else + echo "Working with directory $OUTDIR" + fi +} + +#Check whether download did not have problem +netdata_test_download(){ + grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null + TEST=$? + if [ $TEST -ne 0 ]; then + netdata_print_error "Cannot do download of the page $2" $? + exit 1 + fi +} + +#Check whether download had a problem +netdata_error_test(){ + grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null + TEST=$? + if [ $TEST -eq 0 ]; then + netdata_print_error "The page $2 did not answer with an error" $? + exit 1 + fi +} + + +# Download information from Netdata +netdata_download_various() { + netdata_print_header "Getting $2" + curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err" + netdata_test_download "$OUTDIR/$3.err" "$1/$2" +} + +netdata_download_various_with_options() { + netdata_print_header "Getting options for $2" + curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err" + netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2" +} + +# Download information from Netdata +netdata_wrong_request_various() { + netdata_print_header "Getting $2" + curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err" + netdata_error_test "$ERRDIR/$3.err" "$1/$2" +} + +# Download charts from Netdata +netdata_download_charts() { + curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err" + netdata_test_download "$OUTDIR/charts.err" "$1/$2" + + #Rewrite the next + grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort +} + +#Test options for a specific chart +netdata_download_chart() { + SEPARATOR="&" + EQUAL="=" + OUTD=$OUTDIR + ENCODED=" " + for I in $(seq 0 1); do + if [ "$I" -eq "1" ] ; then + SEPARATOR="%26" + EQUAL="%3D" + OUTD=$OUTEDIR + ENCODED="encoded" + fi + + NAME=${3//\"/} + netdata_print_header "Getting data for $NAME using $4 $ENCODED" + + LDIR=$OUTD"/"$4 + + LURL="$1/$2$EQUAL$NAME" + + NAME=$NAME"_$4" + + curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err" + netdata_test_download "$LDIR/$NAME.err" "$LURL" + + UFILES=( "points" "before" "after" ) + COUNTER=0 + for OPT in "points=100" "before=$PT" "after=$CT" ; + do + LURL="$LURL$SEPARATOR$OPT" + LFILE=$NAME"_${UFILES[$COUNTER]}"; + + curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err" + netdata_test_download "$LDIR/$LFILE.err" "$LURL" + + COUNTER=$((COUNTER + 1)) + done + + LURL="$LURL&group$EQUAL" + for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average"; + do + TURL=$LURL$OPT + TFILE=$NAME"_$OPT"; + curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err" + netdata_test_download "$LDIR/$TFILE.err" "$TURL" + for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array"; + do + TURL=$TURL"&format="$MORE + TFILE=$NAME"_$OPT""_$MORE"; + curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err" + netdata_test_download "$LDIR/$TFILE.err" "$TURL" + done + done + + LURL="$LURL$OPT>ime=60" + NFILE=$NAME"_gtime" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&options=percentage" + NFILE=$NAME"_percentage" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&dimensions=system%7Cnice" + NFILE=$NAME"_dimension" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&label=testing" + NFILE=$NAME"_label" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + done +} + +# Download information from Netdata +netdata_download_allmetrics() { + netdata_print_header "Getting All metrics" + LURL="$1/api/v1/allmetrics?format=" + for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ; + do + TURL=$LURL$FMT + for OPT in "yes" "no"; + do + if [ "$FMT" == "prometheus" ]; then + TURL="$TURL&help=$OPT&types=$OPT×tamps=$OPT" + fi + TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND" + + NAME="allmetrics_$FMT" + echo "$OUTDIR/$2/$NAME.out" + curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err" + netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL" + done + done +} + + +################################################################################################ +#### #### +#### MAIN ROUTINE #### +#### #### +################################################################################################ +MURL="http://127.0.0.1:19999" + +netdata_create_directory $OUTDIR +netdata_create_directory $OUTEDIR +netdata_create_directory $OUTOPTDIR +netdata_create_directory $ERRDIR + +wget --execute="robots = off" --mirror --convert-links --no-parent http://127.0.0.1:19999 +TEST=$? +if [ $TEST -ne "0" ] ; then + echo "Cannot connect to Netdata" + exit 1 +fi + +netdata_download_various $MURL "netdata.conf" "netdata.conf" + +netdata_download_various_with_options $MURL "netdata.conf" "netdata.conf" + +netdata_wrong_request_various $MURL "api/v15/info?this%20could%20not%20be%20here" "err_version" + +netdata_wrong_request_various $MURL "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2" + +netdata_download_various $MURL "api/v1/info" "info" +netdata_download_various_with_options $MURL "api/v1/info" "info" +netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info" + +netdata_print_header "Getting all the netdata charts" +CHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts" ) +WCHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts?this%20could%20not%20be%20here" ) +WCHARTS2=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts%3fthis%20could%20not%20be%20here" ) + +if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then + echo "The number of charts does not match with division not encoded."; + exit 2; +elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then + echo "The number of charts does not match when everything is encoded"; + exit 3; +fi + +netdata_wrong_request_various $MURL "api/v1/chart" "err_chart_without_chart" +netdata_wrong_request_various $MURL "api/v1/chart?_=234231424242" "err_chart_arg" + +netdata_download_various $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args" +netdata_download_various_with_options $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args" + +netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded" +netdata_download_various_with_options $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded" +netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2" +netdata_download_various $MURL "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3" + +netdata_create_directory "$OUTDIR/chart" +for I in $CHARTS ; do + NAME=${I//\"/} + netdata_download_various $MURL "api/v1/chart?chart=$NAME" "chart/$NAME" +done + +netdata_wrong_request_various $MURL "api/v1/alarm_variables" "err_alarm_variables_without_chart" +netdata_wrong_request_various $MURL "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg" +netdata_download_various $MURL "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args" + +netdata_create_directory "$OUTDIR/alarm_variables" +for I in $CHARTS ; do + NAME=${I//\"/} + netdata_download_various $MURL "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME" +done + +netdata_create_directory "$OUTDIR/badge" +netdata_create_directory "$OUTEDIR/badge" +for I in $CHARTS ; do + netdata_download_chart $MURL "api/v1/badge.svg?chart" "$I" "badge" +done + +netdata_create_directory "$OUTDIR/allmetrics" +netdata_download_allmetrics $MURL "allmetrics" + +netdata_download_various $MURL "api/v1/alarms?all" "alarms_all" +netdata_download_various $MURL "api/v1/alarms?active" "alarms_active" +netdata_download_various $MURL "api/v1/alarms" "alarms_nothing" + +netdata_download_various $MURL "api/v1/alarm_log?after" "alarm_without" +netdata_download_various $MURL "api/v1/alarm_log" "alarm_nothing" +netdata_download_various $MURL "api/v1/alarm_log?after&_=$PT" "alarm_log" + +netdata_create_directory "$OUTDIR/data" +netdata_create_directory "$OUTEDIR/data" +for I in $CHARTS ; do + netdata_download_chart $MURL "api/v1/data?chart" "$I" "data" + break; +done + +#http://arch-esxi:19999/api/v1/(*@&$!$%%5E)!$*%&)!$*%%5E*!%5E%!%5E$%!%5E%(!*%5E*%5E%(*@&$%5E%(!%5E#*&!^#$*&!^%)@($%^)!*&^(!*&^#$&#$)!$%^)!$*%&)#$!^#*$^!(*#^#)!%^!)$*%&!(*&$!^#$*&^!*#^$!*^)%(!*&$%)(!&#$!^*#&$^!*^%)!$%)!(&#$!^#*&^$ + +WHITE='\033[0;37m' +echo -e "${WHITE}ALL the URLS got 200 as answer!" + +exit 0 diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c index b24fddedf..4f9826fb2 100644 --- a/web/api/badges/web_buffer_svg.c +++ b/web/api/badges/web_buffer_svg.c @@ -11,7 +11,7 @@ * https://github.com/badges/shields/blob/master/measure-text.js */ -static double verdana11_widths[256] = { +static double verdana11_widths[128] = { [0] = 0.0, [1] = 0.0, [2] = 0.0, @@ -139,157 +139,36 @@ static double verdana11_widths[256] = { [124] = 4.9951171875, // | [125] = 6.982421875, // } [126] = 9.001953125, // ~ - [127] = 0.0, - [128] = 0.0, - [129] = 0.0, - [130] = 0.0, - [131] = 0.0, - [132] = 0.0, - [133] = 0.0, - [134] = 0.0, - [135] = 0.0, - [136] = 0.0, - [137] = 0.0, - [138] = 0.0, - [139] = 0.0, - [140] = 0.0, - [141] = 0.0, - [142] = 0.0, - [143] = 0.0, - [144] = 0.0, - [145] = 0.0, - [146] = 0.0, - [147] = 0.0, - [148] = 0.0, - [149] = 0.0, - [150] = 0.0, - [151] = 0.0, - [152] = 0.0, - [153] = 0.0, - [154] = 0.0, - [155] = 0.0, - [156] = 0.0, - [157] = 0.0, - [158] = 0.0, - [159] = 0.0, - [160] = 0.0, - [161] = 0.0, - [162] = 0.0, - [163] = 0.0, - [164] = 0.0, - [165] = 0.0, - [166] = 0.0, - [167] = 0.0, - [168] = 0.0, - [169] = 0.0, - [170] = 0.0, - [171] = 0.0, - [172] = 0.0, - [173] = 0.0, - [174] = 0.0, - [175] = 0.0, - [176] = 0.0, - [177] = 0.0, - [178] = 0.0, - [179] = 0.0, - [180] = 0.0, - [181] = 0.0, - [182] = 0.0, - [183] = 0.0, - [184] = 0.0, - [185] = 0.0, - [186] = 0.0, - [187] = 0.0, - [188] = 0.0, - [189] = 0.0, - [190] = 0.0, - [191] = 0.0, - [192] = 0.0, - [193] = 0.0, - [194] = 0.0, - [195] = 0.0, - [196] = 0.0, - [197] = 0.0, - [198] = 0.0, - [199] = 0.0, - [200] = 0.0, - [201] = 0.0, - [202] = 0.0, - [203] = 0.0, - [204] = 0.0, - [205] = 0.0, - [206] = 0.0, - [207] = 0.0, - [208] = 0.0, - [209] = 0.0, - [210] = 0.0, - [211] = 0.0, - [212] = 0.0, - [213] = 0.0, - [214] = 0.0, - [215] = 0.0, - [216] = 0.0, - [217] = 0.0, - [218] = 0.0, - [219] = 0.0, - [220] = 0.0, - [221] = 0.0, - [222] = 0.0, - [223] = 0.0, - [224] = 0.0, - [225] = 0.0, - [226] = 0.0, - [227] = 0.0, - [228] = 0.0, - [229] = 0.0, - [230] = 0.0, - [231] = 0.0, - [232] = 0.0, - [233] = 0.0, - [234] = 0.0, - [235] = 0.0, - [236] = 0.0, - [237] = 0.0, - [238] = 0.0, - [239] = 0.0, - [240] = 0.0, - [241] = 0.0, - [242] = 0.0, - [243] = 0.0, - [244] = 0.0, - [245] = 0.0, - [246] = 0.0, - [247] = 0.0, - [248] = 0.0, - [249] = 0.0, - [250] = 0.0, - [251] = 0.0, - [252] = 0.0, - [253] = 0.0, - [254] = 0.0, - [255] = 0.0 + [127] = 0.0 }; // find the width of the string using the verdana 11points font -// re-write the string in place, skiping zero-length characters -static inline double verdana11_width(char *s) { +static inline double verdana11_width(const char *s, float em_size) { double w = 0.0; - char *d = s; while(*s) { - double t = verdana11_widths[(unsigned char)*s]; - if(t == 0.0) + // if UTF8 multibyte char found and guess it's width equal 1em + // as label width will be updated with JavaScript this is not so important + + // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata + // then use url_utf8_get_byte_length etc. + if(IS_UTF8_STARTBYTE(*s)) { s++; + while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){ + s++; + } + w += em_size; + } else { - w += t + VERDANA_KERNING; - if(d != s) - *d++ = *s++; - else - d = ++s; + if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8 + double t = verdana11_widths[(unsigned char)*s]; + if(t != 0.0) + w += t + VERDANA_KERNING; + } + s++; } } - *d = '\0'; w -= VERDANA_KERNING; w += VERDANA_PADDING; return w; @@ -810,8 +689,7 @@ static inline void calc_colorz(const char *color, char *final, size_t len, calcu #define COLOR_STRING_SIZE 100 void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) { - char label_buffer[LABEL_STRING_SIZE + 1] - , value_color_buffer[COLOR_STRING_SIZE + 1] + char value_color_buffer[COLOR_STRING_SIZE + 1] , value_string[VALUE_STRING_SIZE + 1] , label_escaped[LABEL_STRING_SIZE + 1] , value_escaped[VALUE_STRING_SIZE + 1] @@ -831,14 +709,11 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision); - // we need to copy the label, since verdana11_width may write to it - strncpyz(label_buffer, label, LABEL_STRING_SIZE); - - label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2); - value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2); + label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2); + value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2); total_width = label_width + value_width; - escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE); + escape_xmlz(label_escaped, label, LABEL_STRING_SIZE); escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE); escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE); @@ -862,19 +737,43 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch "" "" "" - "" + "" "" "" - "" - "" - "" + "" + "" + "" "" "" - "%s" - "%s" - "%s" - "%s" + "%s" + "%s" + "%s" + "%s" "" + "" "", total_width, height, total_width, height, round_corner, @@ -885,7 +784,8 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch label_width / 2, ceil(height - text_offset), label_escaped, label_width / 2, ceil(height - text_offset - 1.0), label_escaped, label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped); + label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped, + BADGE_HORIZONTAL_PADDING ); } int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { diff --git a/web/api/health/README.md b/web/api/health/README.md index 66a80d5f6..0b4f79f38 100644 --- a/web/api/health/README.md +++ b/web/api/health/README.md @@ -50,35 +50,39 @@ From Netdata v1.16.0 and beyond, the configuration controlled via the API comman Specifically, the API allows you to: - Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log. - Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the netdata UI will show the alarms as active, but no notifications will be sent. - - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. + - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://localhost:19999/netdata.conf`: -```bash +``` [registry] # netdata management api key file = /var/lib/netdata/netdata.api.key ``` -You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this: +You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this: ``` -curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" ``` -The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled. +By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in netdata.conf within the [web] section. See [web server access lists](../../server/#access-lists) for more information. + +The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled. If you've configured and entered your token correclty, you should see the plain text response `All health checks and notifications are enabled`. ### Disable or silence all alarms If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts: + ``` -curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken" ``` + The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log. If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this: ``` -curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken" ``` Alarms may then still be raised and logged in netdata, so you'll be able to see them via the UI. @@ -86,44 +90,44 @@ Alarms may then still be raised and logged in netdata, so you'll be able to see Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command. ``` - curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" + curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" ``` ### Disable or silence specific alarms -If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used. +If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used. Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced. -- `DISABLE` : Set the mode to disable health checks. -- `SILENCE` : Set the mode to silence notifications. +- `DISABLE` : Set the mode to disable health checks. +- `SILENCE` : Set the mode to silence notifications. -You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well. -You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa. +You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well. +You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa. -Each request can specify a single alarm `selector`, with one or more `selection criteria`. -A single alarm will match a `selector` if all selection criteria match the alarm. +Each request can specify a single alarm `selector`, with one or more `selection criteria`. +A single alarm will match a `selector` if all selection criteria match the alarm. You can add as many selectors as you like. In essence, the rule is: IF (alarm matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command. To clear all selectors and reset the mode to default, use the `RESET` command. -The following example silences notifications for all the alarms with context=load: +The following example silences notifications for all the alarms with context=load: ``` -curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken" ``` -#### Selection criteria +#### Selection criteria -The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two). +The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two). The accepted keys for the `selection criteria` are the following: -- `alarm` : The expression provided will match both `alarm` and `template` names. +- `alarm` : The expression provided will match both `alarm` and `template` names. - `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`. - `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`. - `hosts` : The hostnames that will need to match. - `families` : The alarm families. -You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts. +You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts. Example 1: Disable all health checks for context = `random` @@ -152,6 +156,7 @@ The command `LIST` was added in netdata v1.16.0 and returns a JSON with the curr ``` As an example, the following response shows that we have two silencers configured, one for an alarm called `samplealarm` and one for alarms with context `random` on host `myhost` + ``` json { @@ -178,7 +183,7 @@ json "type": "DISABLE", "silencers": [] } - +``` ### Responses diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c index 468054c67..94293dbe6 100644 --- a/web/api/health/health_cmdapi.c +++ b/web/api/health/health_cmdapi.c @@ -179,6 +179,7 @@ int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, c silencer = health_silencers_addparam(silencer, key, value); } } + if (likely(silencer)) { health_silencers_add(silencer); buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED); diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c index 7c0d728bf..2273224bb 100644 --- a/web/api/web_api_v1.c +++ b/web/api/web_api_v1.c @@ -797,23 +797,23 @@ inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char * } // get the command - char *tok = mystrsep(&url, "?"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, tok); - uint32_t hash = simple_hash(tok); + if(url) { + debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, url); + uint32_t hash = simple_hash(url); for(i = 0; api_commands[i].command ;i++) { - if(unlikely(hash == api_commands[i].hash && !strcmp(tok, api_commands[i].command))) { + if(unlikely(hash == api_commands[i].hash && !strcmp(url, api_commands[i].command))) { if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) return web_client_permission_denied(w); - return api_commands[i].callback(host, w, url); + //return api_commands[i].callback(host, w, url); + return api_commands[i].callback(host, w, (w->decoded_query_string + 1)); } } buffer_flush(w->response.data); buffer_strcat(w->response.data, "Unsupported v1 API command: "); - buffer_strcat_htmlescape(w->response.data, tok); + buffer_strcat_htmlescape(w->response.data, url); return 404; } else { diff --git a/web/gui/console.html b/web/gui/console.html index 942c8c3cd..9b172644e 100644 --- a/web/gui/console.html +++ b/web/gui/console.html @@ -10,7 +10,7 @@ - + diff --git a/web/gui/infographic.html b/web/gui/infographic.html index b3112781b..24ff8f4e6 100644 --- a/web/gui/infographic.html +++ b/web/gui/infographic.html @@ -9,13 +9,13 @@ - + - + @@ -91,8 +91,8 @@ "toolbar":"", "auto-fit":true, "check-visible-state":false, - "edit":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml", - "url":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml" + "edit":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml", + "url":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml" }; document.getElementById("drawing").dataset.mxgraph = JSON.stringify(opts); diff --git a/web/gui/main.css b/web/gui/main.css index 2ddb776e5..b6ba95910 100644 --- a/web/gui/main.css +++ b/web/gui/main.css @@ -151,12 +151,6 @@ body.modal-open { /*width: 220px;*/ } -/* -.affix-top { - width: 220px; -} -*/ - .dashboard-sidebar { max-height: calc(100% - 70px) !important; overflow-y: auto; @@ -168,12 +162,6 @@ body.modal-open { position: static; } -@media (min-width: 768px) { - .dashboard-sidebar { - padding-left: 20px; - } -} - /* First level of nav */ .dashboard-sidenav { margin-top: 20px; @@ -353,146 +341,6 @@ body.modal-open { user-select: none; } -@media print { - body { - overflow: visible !important; - -webkit-print-color-adjust: exact; - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .dashboard-section { - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .dashboard-subsection { - page-break-before: avoid; - page-break-after: auto; - page-break-inside: auto; - } - - .charts-body { - padding-left: 0%; - padding-right: 0%; - display: block; - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .back-to-top, - .dashboard-theme-toggle { - display: block; - } -} - -@media (min-width: 768px) { - .charts-body { - padding-left: 0%; - padding-right: 0%; - } - - .back-to-top, - .dashboard-theme-toggle { - display: block; - } -} - -/* Show and affix the side nav when space allows it */ -@media (min-width: 992px) { - .container { - padding-left: 0% !important; - } - - .charts-body { - width: calc(100% - 213px) !important; - padding-left: 1% !important; - padding-right: 0% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 213px !important; - } - - .dashboard-sidebar .nav > .active > ul { - display: block; - } - - /* Widen the fixed sidebar */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 213px !important; - } - - .dashboard-sidebar.affix { - position: fixed; /* Undo the static from mobile first approach */ - top: 20px; - } - - .dashboard-sidebar.affix-bottom { - position: absolute; /* Undo the static from mobile first approach */ - } - - .dashboard-sidebar.affix-bottom .dashboard-sidenav, - .dashboard-sidebar.affix .dashboard-sidenav { - margin-top: 0; - margin-bottom: 0; - } -} - -@media (min-width: 1200px) { - .container { - padding-left: 2% !important; - } - - .charts-body { - width: calc(100% - 233px) !important; - padding-left: 1% !important; - padding-right: 1% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 233px !important; - } - - /* Widen the fixed sidebar again */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 233px !important; - } -} - -@media (min-width: 1360px) { - .container { - padding-left: 3% !important; - } - - .charts-body { - width: calc(100% - 263px) !important; - padding-left: 1% !important; - padding-right: 2% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 263px !important; - } - - /* Widen the fixed sidebar again */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 263px !important; - } -} - .action-button { position: relative; display: inline-block; @@ -664,8 +512,23 @@ body.modal-open { right: 19px; } +#myNetdataDropdownParent { + float: left; +} + #hostname { font-size: 18px; + overflow: hidden; + text-overflow: ellipsis; + max-width: 220px; + } + + #hostnametext { + white-space: pre; + float: left; + text-overflow: ellipsis; + overflow: hidden; + max-width: 160px; } .sign-in-btn { @@ -718,3 +581,180 @@ body.modal-open { .beta { color:#FFCC00; } + + +@media (min-width: 1400px) { + #hostname { + max-width: 600px !important; + } + + #hostnametext { + max-width: 540px !important; + } +} + +@media (min-width: 1360px) { + .container { + padding-left: 3% !important; + } + + #hostname { + max-width: 280px !important; + } + + #hostnametext { + max-width: 220px !important; + } + + .charts-body { + width: calc(100% - 263px) !important; + padding-left: 1% !important; + padding-right: 2% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 263px !important; + } + + /* Widen the fixed sidebar again */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 263px !important; + } +} + +@media (min-width: 1200px) { + #hostname { + max-width: 100px; + } + + #hostnametext { + max-width: 40px; + } + .container { + padding-left: 2% !important; + } + + + .charts-body { + width: calc(100% - 233px) !important; + padding-left: 1% !important; + padding-right: 1% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 233px !important; + } + + /* Widen the fixed sidebar again */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 233px !important; + } +} + +@media (min-width: 992px) { + .container { + padding-left: 0% !important; + } + + .charts-body { + width: calc(100% - 213px) !important; + padding-left: 1% !important; + padding-right: 0% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 213px !important; + } + + .dashboard-sidebar .nav > .active > ul { + display: block; + } + + /* Widen the fixed sidebar */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 213px !important; + } + + .dashboard-sidebar.affix { + position: fixed; /* Undo the static from mobile first approach */ + top: 20px; + } + + .dashboard-sidebar.affix-bottom { + position: absolute; /* Undo the static from mobile first approach */ + } + + .dashboard-sidebar.affix-bottom .dashboard-sidenav, + .dashboard-sidebar.affix .dashboard-sidenav { + margin-top: 0; + margin-bottom: 0; + } +} + +@media (min-width: 860px) { + .dashboard-sidebar { + padding-left: 20px; + } + +} + +@media (min-width: 768px) { + .dashboard-sidebar { + padding-left: 20px; + } + + .charts-body { + padding-left: 0%; + padding-right: 0%; + } + + .back-to-top, + .dashboard-theme-toggle { + display: block; + } +} + +@media print { + body { + overflow: visible !important; + -webkit-print-color-adjust: exact; + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .dashboard-section { + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .dashboard-subsection { + page-break-before: avoid; + page-break-after: auto; + page-break-inside: auto; + } + + .charts-body { + padding-left: 0%; + padding-right: 0%; + display: block; + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .back-to-top, + .dashboard-theme-toggle { + display: block; + } +} diff --git a/web/gui/main.js b/web/gui/main.js index 65c4d4a88..1214eba6f 100644 --- a/web/gui/main.js +++ b/web/gui/main.js @@ -704,11 +704,11 @@ function restrictMyNetdataMenu() { `); } -function openAuthenticatedUrl(url) { +function openAuthenticatedUrl(url) { if (isSignedIn()) { window.open(url); } else { - window.open(`${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}`); + window.open(`${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}&redirectUrl=${encodeURIComponent(window.location.origin + "/" + url)}`); } } @@ -1775,8 +1775,6 @@ function renderPage(menus, data) { if (urlOptions.mode === 'print') { chtml += ''; } - - // console.log(' \------- ' + chart.id + ' (' + chart.priority + '): ' + chart.context + ' height: ' + menus[menu].submenus[submenu].height); } head += ''; @@ -2747,7 +2745,7 @@ function initializeDynamicDashboardWithData(data) { } // update the dashboard hostname - document.getElementById('hostname').innerHTML = options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + '  '; + document.getElementById('hostname').innerHTML = '' + options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + '  '; document.getElementById('hostname').href = NETDATA.serverDefault; document.getElementById('netdataVersion').innerHTML = options.version; @@ -4899,6 +4897,9 @@ function handleSignInMessage(e) { cloudToken = e.data.token; netdataRegistryCallback(registryAgents); + if (e.data.redirectUrl) { + window.location.replace(e.data.redirectUrl); + } } function handleSignOutMessage(e) { diff --git a/web/gui/src/dashboard.js/main.js b/web/gui/src/dashboard.js/main.js index 13f3b4c7d..564ee7d4e 100644 --- a/web/gui/src/dashboard.js/main.js +++ b/web/gui/src/dashboard.js/main.js @@ -3071,10 +3071,10 @@ let chartState = function (element) { }; this.chartDataUniqueID = function () { - return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(); + return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(true); }; - this.chartURLOptions = function () { + this.chartURLOptions = function (isForUniqueId) { let ret = ''; if (this.override_options !== null) { @@ -3089,7 +3089,9 @@ let chartState = function (element) { ret += '%7C' + 'jsonwrap'; - if (NETDATA.options.current.eliminate_zero_dimensions) { + // always add `nonzero` when it's used to create a chartDataUniqueID + // we cannot just remove `nonzero` because of backwards compatibility with old snapshots + if (isForUniqueId || NETDATA.options.current.eliminate_zero_dimensions) { ret += '%7C' + 'nonzero'; } diff --git a/web/server/README.md b/web/server/README.md index df29f331f..173e89596 100644 --- a/web/server/README.md +++ b/web/server/README.md @@ -59,42 +59,43 @@ The API requests are serviced as follows: ### Enabling TLS support +Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data between a slave and its master, via the TLS 1.2 protocol. -Netdata since version 1.16 supports encrypted HTTP connections to the web server and encryption of the data stream between a slave and a master. -Inbound unix socket connections are unaffected, regardless of the SSL settings. -To enable SSL, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`: +Inbound unix socket connections are unaffected, regardless of the TLS settings. +??? info "Differences in TLS and SSL terminology" + While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`. -``` +To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`: + +``` conf [web] ssl key = /etc/netdata/ssl/key.pem ssl certificate = /etc/netdata/ssl/cert.pem ``` -Both files must be readable by the netdata user. If any of the two files does not exist or is unreadable, Netdata falls back to HTTP. - -For a master/slave connection, only the master needs these settings. +Both files must be readable by the `netdata` user. If either of these files do not exist or are unreadable, Netdata will fall back to HTTP. For a master/slave connection, only the master needs these settings. For test purposes, you can generate self-signed certificates with the following command: -``` +``` bash $ openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem ``` -TIP: If you use 4096 bits for the key and the certificate, netdata will need more CPU to process the whole communication. -rsa4096 can be until 4 times slower than rsa2048, so we recommend using 2048 bits. You can verify the difference by running - -``` -$ openssl speed rsa2048 rsa4096 -``` +!!! note + If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication. `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. You can verify the difference by running: + + ``` + $ openssl speed rsa2048 rsa4096 + ``` -#### SSL enforcement +#### TLS/SSL enforcement When the certificates are defined and unless any other options are provided, a Netdata server will: + - Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, netdata.conf and badges. - Allow incoming slave connections to use both unencrypted and encrypted communications for streaming. -To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. -At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming. +To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming. SSL setting | HTTP requests | HTTPS requests | Unencrypted Streams | Encrypted Streams :------:|:-----:|:-----:|:-----:|:-------- @@ -109,12 +110,29 @@ Example: bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force ``` -For information how to configure the slaves to use TLS, check [securing the communication](../../streaming#securing-the-communication) in the streaming documentation. -You will find there additional details on the expected behavior for client and server nodes, when their respective SSL options are enabled. +For information how to configure the slaves to use TLS, check [securing the communication](../../streaming#securing-streaming-communications) in the streaming documentation. There you will find additional details on the expected behavior for client and server nodes, when their respective TLS options are enabled. + +When we define the use of SSL in a Netdata agent for different ports, Netdata will apply the behavior specified on each port. For example, using the configuration line below: + +``` +[web] + bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force *:20000=netdata.conf^SSL=optional *:20001=dashboard|registry +``` + +Netdata will: + +- Force all HTTP requests to the default port to be redirected to HTTPS (same port). +- Refuse unencrypted streaming connections from slaves on the default port. +- Allow both HTTP and HTTPS requests to port 20000 for netdata.conf +- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001. + +#### TLS/SSL errors + +When you start using Netdata with TLS, you may find errors in the Netdata log, which is stored at `/var/log/netdata/error.log` by default. -#### SSL error +Most of the time, these errors are due to incompatibilities between your browser's options related to TLS/SSL protocols and Netdata's internal configuration. The most common error is `error:00000006:lib(0):func(0):EVP lib`. -It is possible that when you start to use the Netdata with SSL some erros will be register in the logs, this happens due possible incompatibilities between the browser options related to SSL like Ciphers and TLS/SSL version and the Netdata internal configuration. The most common error would be `error:00000006:lib(0):func(0):EVP lib`. In a near future the Netdata will allow our users to change the internal configuration to avoid errors like this, but until there we are setting the most common and safety options to the communication. +In the near future, Netdata will allow our users to change the internal configuration to avoid similar errors. Until then, we're recommending only the most common and safe encryption protocols, which you can find above. ### Access lists diff --git a/web/server/web_client.c b/web/server/web_client.c index bd275f5e5..2da6c1dec 100644 --- a/web/server/web_client.c +++ b/web/server/web_client.c @@ -16,8 +16,8 @@ inline int web_client_permission_denied(struct web_client *w) { w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); buffer_strcat(w->response.data, "You are not allowed to access this resource."); - w->response.code = 403; - return 403; + w->response.code = HTTP_RESP_FORBIDDEN; + return HTTP_RESP_FORBIDDEN; } static inline int web_client_crock_socket(struct web_client *w) { @@ -337,7 +337,7 @@ static inline int access_to_file_is_not_permitted(struct web_client *w, const ch w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Access to file is not permitted: "); buffer_strcat_htmlescape(w->response.data, filename); - return 403; + return HTTP_RESP_FORBIDDEN; } int mysendfile(struct web_client *w, char *filename) { @@ -357,7 +357,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_sprintf(w->response.data, "Filename contains invalid characters: "); buffer_strcat_htmlescape(w->response.data, filename); - return 400; + return HTTP_RESP_BAD_REQUEST; } } @@ -367,7 +367,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Relative filenames are not supported: "); buffer_strcat_htmlescape(w->response.data, filename); - return 400; + return HTTP_RESP_BAD_REQUEST; } // find the physical file on disk @@ -383,7 +383,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "File does not exist, or is not accessible: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; + return HTTP_RESP_NOT_FOUND; } if ((statbuf.st_mode & S_IFMT) == S_IFDIR) { @@ -422,14 +422,14 @@ int mysendfile(struct web_client *w, char *filename) { buffer_sprintf(w->response.header, "Location: /%s\r\n", filename); buffer_strcat(w->response.data, "File is currently busy, please try again later: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 307; + return HTTP_RESP_REDIR_TEMP; } else { error("%llu: Cannot open file '%s'.", w->id, webfilename); w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Cannot open file: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; + return HTTP_RESP_NOT_FOUND; } } @@ -451,7 +451,7 @@ int mysendfile(struct web_client *w, char *filename) { #endif /* __APPLE__ */ buffer_cacheable(w->response.data); - return 200; + return HTTP_RESP_OK; } @@ -570,7 +570,7 @@ static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char //if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) { // buffer_flush(w->response.data); // buffer_strcat(w->response.data, "This host does not maintain a database"); - // return 400; + // return HTTP_RESP_BAD_REQUEST; //} return func(host, w, url); @@ -603,13 +603,13 @@ int web_client_api_request(RRDHOST *host, struct web_client *w, char *url) w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Unsupported API version: "); buffer_strcat_htmlescape(w->response.data, tok); - return 404; + return HTTP_RESP_NOT_FOUND; } } else { buffer_flush(w->response.data); buffer_sprintf(w->response.data, "Which API version?"); - return 400; + return HTTP_RESP_BAD_REQUEST; } } @@ -687,25 +687,25 @@ const char *web_content_type_to_string(uint8_t contenttype) { const char *web_response_code_to_string(int code) { switch(code) { - case 200: + case HTTP_RESP_OK: return "OK"; - case 301: + case HTTP_RESP_MOVED_PERM: return "Moved Permanently"; - case 307: + case HTTP_RESP_REDIR_TEMP: return "Temporary Redirect"; - case 400: + case HTTP_RESP_BAD_REQUEST: return "Bad Request"; - case 403: + case HTTP_RESP_FORBIDDEN: return "Forbidden"; - case 404: + case HTTP_RESP_NOT_FOUND: return "Not Found"; - case 412: + case HTTP_RESP_PRECOND_FAIL: return "Preconditions Failed"; default: @@ -772,7 +772,6 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u // terminate the value *ve = '\0'; - // fprintf(stderr, "HEADER: '%s' = '%s'\n", s, v); uint32_t hash = simple_uhash(s); if(hash == hash_origin && !strcasecmp(s, "Origin")) @@ -812,65 +811,35 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u return ve; } -// http_request_validate() -// returns: -// = 0 : all good, process the request -// > 0 : request is not supported -// < 0 : request is incomplete - wait for more data - -typedef enum { - HTTP_VALIDATION_OK, - HTTP_VALIDATION_NOT_SUPPORTED, -#ifdef ENABLE_HTTPS - HTTP_VALIDATION_INCOMPLETE, - HTTP_VALIDATION_REDIRECT -#else - HTTP_VALIDATION_INCOMPLETE -#endif -} HTTP_VALIDATION; - -static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { - char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL; - - size_t last_pos = w->header_parse_last_size; - if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n - else last_pos = 0; - - w->header_parse_tries++; - w->header_parse_last_size = buffer_strlen(w->response.data); - - if(w->header_parse_tries > 1) { - if(w->header_parse_last_size < last_pos) - last_pos = 0; - - if(strstr(&s[last_pos], "\r\n\r\n") == NULL) { - if(w->header_parse_tries > 10) { - info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data)); - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - web_client_disable_wait_receive(w); - return HTTP_VALIDATION_NOT_SUPPORTED; - } - - return HTTP_VALIDATION_INCOMPLETE; - } - } - +/** + * Valid Method + * + * Netdata accepts only three methods, including one of these three(STREAM) is an internal method. + * + * @param w is the structure with the client request + * @param s is the start string to parse + * + * @return it returns the next address to parse case the method is valid and NULL otherwise. + */ +static inline char *web_client_valid_method(struct web_client *w, char *s) { // is is a valid request? if(!strncmp(s, "GET ", 4)) { - encoded_url = s = &s[4]; + s = &s[4]; w->mode = WEB_CLIENT_MODE_NORMAL; } else if(!strncmp(s, "OPTIONS ", 8)) { - encoded_url = s = &s[8]; + s = &s[8]; w->mode = WEB_CLIENT_MODE_OPTIONS; } else if(!strncmp(s, "STREAM ", 7)) { + s = &s[7]; + #ifdef ENABLE_HTTPS - if ( (w->ssl.flags) && (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE)){ + if (w->ssl.flags && web_client_is_using_ssl_force(w)){ w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); + char hostname[256]; char *copyme = strstr(s,"hostname="); if ( copyme ){ @@ -891,29 +860,150 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { hostname[13] = 0x00; } error("The server is configured to always use encrypt connection, please enable the SSL on slave with hostname '%s'.",hostname); - return HTTP_VALIDATION_NOT_SUPPORTED; + s = NULL; } #endif - encoded_url = s = &s[7]; w->mode = WEB_CLIENT_MODE_STREAM; } else { + s = NULL; + } + + return s; +} + +/** + * Set Path Query + * + * Set the pointers to the path and query string according to the input. + * + * @param w is the structure with the client request + * @param s is the first address of the string. + * @param ptr is the address of the separator. + */ +static void web_client_set_path_query(struct web_client *w, char *s, char *ptr) { + w->url_path_length = (size_t)(ptr -s); + + w->url_search_path = ptr; +} + +/** + * Split path query + * + * Do the separation between path and query string + * + * @param w is the structure with the client request + * @param s is the string to parse + */ +void web_client_split_path_query(struct web_client *w, char *s) { + //I am assuming here that the separator character(?) is not encoded + char *ptr = strchr(s, '?'); + if(ptr) { + w->separator = '?'; + web_client_set_path_query(w, s, ptr); + return; + } + + //Here I test the second possibility, the URL is completely encoded by the user. + //I am not using the strcasestr, because it is fastest to check %3f and compare + //the next character. + //We executed some tests with "encodeURI(uri);" described in https://www.w3schools.com/jsref/jsref_encodeuri.asp + //on July 1st, 2019, that show us that URLs won't have '?','=' and '&' encoded, but we decided to move in front + //with the next part, because users can develop their own encoded that won't follow this rule. + char *moveme = s; + while (moveme) { + ptr = strchr(moveme, '%'); + if(ptr) { + char *test = (ptr+1); + if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) { + w->separator = *ptr; + web_client_set_path_query(w, s, ptr); + return; + } + ptr++; + } + + moveme = ptr; + } + + w->separator = 0x00; + w->url_path_length = strlen(s); + w->url_search_path = NULL; +} + +/** + * Request validate + * + * @param w is the structure with the client request + * + * @return It returns HTTP_VALIDATION_OK on success and another code present + * in the enum HTTP_VALIDATION otherwise. + */ +static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { + char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL; + + size_t last_pos = w->header_parse_last_size; + + w->header_parse_tries++; + w->header_parse_last_size = buffer_strlen(w->response.data); + + int is_it_valid; + if(w->header_parse_tries > 1) { + if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n + else last_pos = 0; + + if(w->header_parse_last_size < last_pos) + last_pos = 0; + + is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size); + if(!is_it_valid) { + if(w->header_parse_tries > 10) { + info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data)); + w->header_parse_tries = 0; + w->header_parse_last_size = 0; + web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } + + return HTTP_VALIDATION_INCOMPLETE; + } + + is_it_valid = 1; + } else { + last_pos = w->header_parse_last_size; + is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size); + } + + s = web_client_valid_method(w, s); + if (!s) { w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } else if (!is_it_valid) { + //Invalid request, we have more data after the end of message + char *check = strstr((char *)buffer_tostring(w->response.data), "\r\n\r\n"); + if(check) { + check += 4; + if (*check) { + w->header_parse_tries = 0; + w->header_parse_last_size = 0; + web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } + } + + web_client_enable_wait_receive(w); + return HTTP_VALIDATION_INCOMPLETE; } - // find the SPACE + "HTTP/" - while(*s) { - // find the next space - while (*s && *s != ' ') s++; + //After the method we have the path and query string together + encoded_url = s; - // is it SPACE + "HTTP/" ? - if(*s && !strncmp(s, " HTTP/", 6)) break; - else s++; - } + //we search for the position where we have " HTTP/", because it finishes the user request + s = url_find_protocol(s); // incomplete requests if(unlikely(!*s)) { @@ -924,6 +1014,10 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // we have the end of encoded_url - remember it char *ue = s; + //Variables used to map the variables in the query string case it is present + int total_variables; + char *ptr_variables[WEB_FIELDS_MAX]; + // make sure we have complete request // complete requests contain: \r\n\r\n while(*s) { @@ -941,15 +1035,41 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // a valid complete HTTP request found *ue = '\0'; - url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1); + if(w->mode != WEB_CLIENT_MODE_NORMAL) { + if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) + return HTTP_VALIDATION_MALFORMED_URL; + } else { + web_client_split_path_query(w, encoded_url); + + if (w->separator) { + *w->url_search_path = 0x00; + } + + if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) + return HTTP_VALIDATION_MALFORMED_URL; + + if (w->separator) { + *w->url_search_path = w->separator; + + char *from = (encoded_url + w->url_path_length); + total_variables = url_map_query_string(ptr_variables, from); + + if (url_parse_query_string(w->decoded_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1, ptr_variables, total_variables)) { + return HTTP_VALIDATION_MALFORMED_URL; + } + } + } *ue = ' '; - + // copy the URL - we are going to overwrite parts of it // TODO -- ideally we we should avoid copying buffers around strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE); + if (w->separator) { + *w->url_search_path = 0x00; + } #ifdef ENABLE_HTTPS if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) { - if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (netdata_use_ssl_on_http & NETDATA_SSL_FORCE) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) { + if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) { w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); @@ -997,7 +1117,7 @@ static inline ssize_t web_client_send_data(struct web_client *w,const void *buf, } static inline void web_client_send_http_header(struct web_client *w) { - if(unlikely(w->response.code != 200)) + if(unlikely(w->response.code != HTTP_RESP_OK)) buffer_no_cacheable(w->response.data); // set a proper expiration date, if not already set @@ -1027,7 +1147,7 @@ static inline void web_client_send_http_header(struct web_client *w) { } char headerbegin[8328]; - if (w->response.code == 301) { + if (w->response.code == HTTP_RESP_MOVED_PERM) { memcpy(headerbegin,"\r\nLocation: https://",20); size_t headerlength = strlen(w->host); memcpy(&headerbegin[20],w->host,headerlength); @@ -1210,7 +1330,7 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch if(host != localhost) { buffer_flush(w->response.data); buffer_strcat(w->response.data, "Nesting of hosts is not allowed."); - return 400; + return HTTP_RESP_BAD_REQUEST; } char *tok = mystrsep(&url, "/"); @@ -1234,7 +1354,7 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "This netdata does not maintain a database for host: "); buffer_strcat_htmlescape(w->response.data, tok?tok:""); - return 404; + return HTTP_RESP_NOT_FOUND; } static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) { @@ -1279,7 +1399,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); config_generate(w->response.data, 0); - return 200; + return HTTP_RESP_OK; } #ifdef NETDATA_INTERNAL_CHECKS else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) { @@ -1296,7 +1416,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch error("web request to exit received."); netdata_cleanup_and_exit(0); - return 200; + return HTTP_RESP_OK; } else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) { if(unlikely(!web_client_can_access_netdataconf(w))) @@ -1317,7 +1437,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch buffer_strcat(w->response.data, "Chart is not found: "); buffer_strcat_htmlescape(w->response.data, tok); debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok); - return 404; + return HTTP_RESP_NOT_FOUND; } debug_flags |= D_RRD_STATS; @@ -1331,12 +1451,12 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); buffer_strcat_htmlescape(w->response.data, tok); debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); - return 200; + return HTTP_RESP_OK; } buffer_flush(w->response.data); buffer_strcat(w->response.data, "debug which chart?\r\n"); - return 400; + return HTTP_RESP_BAD_REQUEST; } else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) { if(unlikely(!web_client_can_access_netdataconf(w))) @@ -1350,7 +1470,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch // just leave the buffer as is // it will be copied back to the client - return 200; + return HTTP_RESP_OK; } #endif /* NETDATA_INTERNAL_CHECKS */ } @@ -1395,7 +1515,7 @@ void web_client_process_request(struct web_client *w) { w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); buffer_strcat(w->response.data, "OK"); - w->response.code = 200; + w->response.code = HTTP_RESP_OK; break; case WEB_CLIENT_MODE_FILECOPY: @@ -1424,7 +1544,7 @@ void web_client_process_request(struct web_client *w) { buffer_flush(w->response.data); buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len); - w->response.code = 400; + w->response.code = HTTP_RESP_BAD_REQUEST; } else { // wait for more data @@ -1437,16 +1557,23 @@ void web_client_process_request(struct web_client *w) { buffer_flush(w->response.data); w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Redirecting to safety connection, case your browser does not support redirection, please click here."); - w->response.code = 301; + w->response.code = HTTP_RESP_MOVED_PERM; break; } #endif + case HTTP_VALIDATION_MALFORMED_URL: + debug(D_WEB_CLIENT_ACCESS, "%llu: URL parsing failed (malformed URL). Cannot understand '%s'.", w->id, w->response.data->buffer); + + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "URL not valid. I don't understand you...\r\n"); + w->response.code = HTTP_RESP_BAD_REQUEST; + break; case HTTP_VALIDATION_NOT_SUPPORTED: debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer); buffer_flush(w->response.data); buffer_strcat(w->response.data, "I don't understand you...\r\n"); - w->response.code = 400; + w->response.code = HTTP_RESP_BAD_REQUEST; break; } diff --git a/web/server/web_client.h b/web/server/web_client.h index 0a57e8d8e..7cab46fc2 100644 --- a/web/server/web_client.h +++ b/web/server/web_client.h @@ -11,6 +11,21 @@ extern int web_enable_gzip, web_gzip_strategy; #endif /* NETDATA_WITH_ZLIB */ +// HTTP_CODES 2XX Success +#define HTTP_RESP_OK 200 + +// HTTP_CODES 3XX Redirections +#define HTTP_RESP_MOVED_PERM 301 +#define HTTP_RESP_REDIR_TEMP 307 +#define HTTP_RESP_REDIR_PERM 308 + +// HTTP_CODES 4XX Client Errors +#define HTTP_RESP_BAD_REQUEST 400 +#define HTTP_RESP_FORBIDDEN 403 +#define HTTP_RESP_NOT_FOUND 404 +#define HTTP_RESP_PRECOND_FAIL 412 + + extern int respect_web_browser_do_not_track_policy; extern char *web_x_frame_options; @@ -21,6 +36,18 @@ typedef enum web_client_mode { WEB_CLIENT_MODE_STREAM = 3 } WEB_CLIENT_MODE; +typedef enum { + HTTP_VALIDATION_OK, + HTTP_VALIDATION_NOT_SUPPORTED, + HTTP_VALIDATION_MALFORMED_URL, +#ifdef ENABLE_HTTPS + HTTP_VALIDATION_INCOMPLETE, + HTTP_VALIDATION_REDIRECT +#else + HTTP_VALIDATION_INCOMPLETE +#endif +} HTTP_VALIDATION; + typedef enum web_client_flags { WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead @@ -128,8 +155,12 @@ struct web_client { char client_port[NI_MAXSERV+1]; char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer + char decoded_query_string[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the Query String in this buffer char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here char host[256]; + size_t url_path_length; + char separator; // This value can be either '?' or 'f' + char *url_search_path; //A pointer to the search path sent by the client struct timeval tv_in, tv_ready; @@ -159,6 +190,7 @@ struct web_client { #endif }; + extern uid_t web_files_uid(void); extern uid_t web_files_gid(void); -- cgit v1.2.3