diff options
Diffstat (limited to '')
149 files changed, 5761 insertions, 2260 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7d5dfa8b..2b4e0d16 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,33 +13,33 @@ backends/json/ @thiagoftsm @vlvkobal backends/opentsdb/ @thiagoftsm @vlvkobal backends/prometheus/ @vlvkobal @paulkatsoulakis @thiagoftsm build/ @paulkatsoulakis @cakrit -collectors/ @vlvkobal @cakrit +collectors/ @vlvkobal @mfundul @cakrit collectors/charts.d.plugin/ @paulkatsoulakis @cakrit -collectors/freebsd.plugin/ @vlvkobal @cakrit -collectors/macos.plugin/ @vlvkobal @cakrit -collectors/node.d.plugin/ @gmosx @cakrit +collectors/freebsd.plugin/ @vlvkobal @thiagoftsm @cakrit +collectors/macos.plugin/ @vlvkobal @thiagoftsm @cakrit +collectors/node.d.plugin/ @VLegakis @cakrit collectors/node.d.plugin/fronius/ @ccremer @cakrit -collectors/node.d.plugin/snmp/ @gmosx @cakrit +collectors/node.d.plugin/snmp/ @VLegakis @cakrit collectors/node.d.plugin/stiebeleltron/ @ccremer @cakrit collectors/python.d.plugin/ @ilyam8 -collectors/cups.plugin/ @simonnagl @vlvkobal @cakrit +collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm @cakrit daemon/ @thiagoftsm @mfundul @cakrit -database/ @cakrit @mfundul -docs/ @cakrit -health/ @thiagoftsm @cakrit -health/health.d/ @thiagoftsm @cakrit -health/notifications/ @Ferroin @cakrit -libnetdata/ @thiagofsm @cakrit +database/ @mfundul @thiagoftsm @cakrit +docs/ @cakrit @joelhans +health/ @thiagoftsm @vlvkobal @cakrit +health/health.d/ @thiagoftsm @vlvkobal @cakrit +health/notifications/ @Ferroin @thiagoftsm @cakrit +libnetdata/ @thiagofsm @mfundul @cakrit packaging/ @paulkatsoulakis @cakrit packaging/installer/ @paulkatsoulakis @cakrit packaging/makeself/ @paulkatsoulakis @cakrit -registry/ @gmosx @cakrit -streaming/ @cakrit @thiagoftsm -web/ @thiagoftsm @cakrit -web/gui/ @gmosx @cakrit +registry/ @VLegakis @cakrit +streaming/ @thiagoftsm @vlvkobal @cakrit +web/ @thiagoftsm @mfundul @vlvkobal @cakrit +web/gui/ @VLegakis @cakrit # Ownership by filetype (overwrites ownership by directory) -*.md @cakrit +*.md @cakrit @joelhans *.am @paulkatsoulakis # Ownership of specific files @@ -54,8 +54,8 @@ web/gui/ @gmosx @cakrit netdata.spec.in @paulkatsoulakis netdata-installer.sh @paulkatsoulakis @cakrit netlify.toml @cakrit -package.json @gmosx +package.json @VLegakis packaging/version @netdatabot -LICENSE.md @cakrit +LICENSE.md @cakrit @joelhans CHANGELOG.md @netdatabot @@ -139,8 +139,6 @@ cmake_install.cmake .DS_Store webcopylocal* -contrib/debian/changelog - # converted diagrams diagrams/*.png diagrams/*.svg @@ -168,6 +166,8 @@ callgrind.out.* gmon.out gmon.txt sitespeed-result/ +tests/acls/acl.sh +tests/urls/request.sh # tests and temp files python.d/python-modules-installer.sh diff --git a/.travis.yml b/.travis.yml index cb9d7290..a239a0c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ dist: xenial sudo: true language: c services: -- docker + - docker @@ -16,18 +16,20 @@ matrix: # Install dependencies for all, once # install: -- sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python-pip -- sudo apt install -y --only-upgrade docker-ce -- sudo pip install git-semver -- docker info -- source tests/installer/slack.sh -- export NOTIF_CHANNEL="automation-beta" -- if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi; -- export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)" -- if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi; -- export DEPLOY_REPO="netdata" # Default production packaging repository -- if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi; -- export PACKAGING_USER="$(echo ${TRAVIS_REPO_SLUG} | cut -d'/' -f1)" + - sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python-pip + - sudo apt install -y --only-upgrade docker-ce + - sudo pip install git-semver + - docker info + - source tests/installer/slack.sh + - export NOTIF_CHANNEL="automation-beta" + - if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi; + - export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)" + - export LATEST_RELEASE_VERSION="$(cat packaging/version | cut -d'-' -f1)" + - export LATEST_RELEASE_DATE="$(git log -1 --format=%aD "${LATEST_RELEASE_VERSION}" | cat)" + - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi; + - export DEPLOY_REPO="netdata" # Default production packaging repository + - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi; + - export PACKAGING_USER="$(echo ${TRAVIS_REPO_SLUG} | cut -d'/' -f1)" @@ -41,167 +43,148 @@ notifications: # Define the stage sequence and conditionals # stages: -# Mandatory runs, we always want these executed -- name: Code quality, linting, syntax, code style -- name: Build process -- name: Artifacts validation -- name: Artifacts validation on bare OS, stable to current lifecycle checks - if: branch = master AND (type = pull_request OR type = cron) - - # Nightly operations -- name: Nightly operations - if: branch = master AND type = cron -- name: Nightly release - if: branch = master AND type = cron - - # Scheduled releases -- name: Packaging for release - if: branch = master AND type != pull_request AND type != cron - -- name: Publish for release - if: branch = master AND type != pull_request AND type != cron AND commit_message =~ /(\[netdata release candidate\]|\[netdata major release\]|\[netdata minor release\]|\[netdata patch release\])/ - - # Build DEB packages under special conditions - # Ubuntu -- name: "Package ubuntu/disco" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/cosmic" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/bionic" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ -- name: "Package ubuntu/artful" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package AMD64 DEB Ubuntu\]|\[Package AMD64 DEB\])/ - - # Debian -- name: "Package debian/buster" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/stretch" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/jessie" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ -- name: "Package debian/wheezy" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package AMD64 DEB Debian\]|\[Package AMD64 DEB\])/ - - # Build RPM packages under special conditions - # Enterprise linux (Covers CentOS, Redhat, Amazon linux) -- name: "Package Enterprise Linux 7" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Enterprise Linux\]|\[Package arm64 RPM\]|\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package AMD64 RPM Enterprise Linux\]|\[Package AMD64 RPM\])/ -- name: "Package Enterprise linux 6" - if: type != cron AND branch = master AND commit_message =~ /(\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package AMD64 RPM Enterprise Linux\]|\[Package AMD64 RPM\])/ - - # Fedora -- name: "Package Fedora 30" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ -- name: "Package Fedora 29" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ -- name: "Package Fedora 28" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package AMD64 RPM Fedora\]|\[Package AMD64 RPM\])/ - - # OpenSuSE -- name: "Package OpenSuSE 15.1" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package AMD64 RPM openSuSE\]|\[Package AMD64 RPM\])/ -- name: "Package OpenSuSE 15.0" - if: type != cron AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package AMD64 RPM openSuSE\]|\[Package AMD64 RPM\])/ - - - -# DEB and RPM template flows -- stage: &_RPM_TEMPLATE - name: "Build & Publish RPM package" - before_install: - - sudo apt-get install -y wget lxc lxc-templates - - source tests/installer/slack.sh - before_script: - - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" - - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" - script: - - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh - - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh - - sudo chmod -R 755 "/var/lib/lxc" - - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh - after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}" - - before_deploy: - - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found" - deploy: - # Beta packages deployment - - provider: packagecloud - repository: "${DEPLOY_REPO}" - username: "${PACKAGING_USER}" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when packages directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - # Production release packages deployment - - provider: packagecloud - repository: "netdata" - username: "netdata" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when packages directory is created - repo: "netdata/netdata" - branch: "master" - condition: -d "${PACKAGES_DIRECTORY}" - after_deploy: - - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; - - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; - - - # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging -- stage: &_DEB_TEMPLATE - name: "Build & Publish DEB package" - before_install: - - sudo apt-get install -y wget lxc lxc-templates - - source tests/installer/slack.sh - before_script: - - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" - - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" - script: - - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh - - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh - - sudo chmod -R 755 "/var/lib/lxc" - - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh - after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}" - before_deploy: - - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found" - deploy: - # Beta packages deployment - - provider: packagecloud - repository: "${DEPLOY_REPO}" - username: "${PACKAGING_USER}" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when build-area directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - # Production release packages deployment - - provider: packagecloud - repository: "netdata" - username: "netdata" - token: "${PKG_CLOUD_TOKEN}" - dist: "${BUILD_STRING}" - local_dir: "${PACKAGES_DIRECTORY}" - skip_cleanup: true - on: - # Only deploy on ${USER}/netdata, master branch, when build-area directory is created - repo: "netdata/netdata" - branch: master - condition: -d "${PACKAGES_DIRECTORY}" - after_deploy: - - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; - - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; + # Mandatory runs, we always want these executed + - name: Code quality, linting, syntax, code style + - name: Build process + - name: Artifacts validation + - name: Artifacts validation on bare OS, stable to current lifecycle checks + if: branch = master AND (type = pull_request OR type = cron) + + # Nightly operations + - name: Nightly operations + if: branch = master AND type = cron + - name: Nightly release + if: branch = master AND type = cron + + # Scheduled releases + - name: Packaging for release + if: branch = master AND type != pull_request AND type != cron + + - name: Publish for release + if: branch = master AND type != pull_request AND type != cron AND commit_message =~ /(\[netdata release candidate\]|\[netdata major release\]|\[netdata minor release\]|\[netdata patch release\])/ + + # Build DEB packages under special conditions + # Ubuntu + - name: "Package ubuntu/disco" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + - name: "Package ubuntu/cosmic" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + - name: "Package ubuntu/bionic" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/ + + # Debian + - name: "Package debian/buster" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + - name: "Package debian/stretch" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + - name: "Package debian/jessie" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/ + + # Build RPM packages under special conditions + # Enterprise linux (Covers CentOS, Redhat, Amazon linux) + - name: "Package Enterprise Linux 7" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Enterprise Linux\]|\[Package arm64 RPM\]|\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/ + - name: "Package Enterprise linux 6" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/ + + # Fedora + - name: "Package Fedora 30" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + - name: "Package Fedora 29" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + - name: "Package Fedora 28" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/ + + # OpenSuSE + - name: "Package OpenSuSE 15.1" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/ + - name: "Package OpenSuSE 15.0" + if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/ + + + + # DEB and RPM template flows + - stage: &_RPM_TEMPLATE + name: "Build & Publish RPM package" + before_install: + - sudo apt-get install -y wget lxc lxc-templates + - source tests/installer/slack.sh + before_script: + - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" + - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh + - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh + - sudo chmod -R 755 "/var/lib/lxc" + - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}" + before_deploy: + - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found" + deploy: + - provider: packagecloud + repository: "${DEPLOY_REPO}" + username: "${PACKAGING_USER}" + token: "${PKG_CLOUD_TOKEN}" + dist: "${BUILD_STRING}" + local_dir: "${PACKAGES_DIRECTORY}" + skip_cleanup: true + on: + # Only deploy on ${USER}/netdata, master branch, when packages directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: "master" + condition: -d "${PACKAGES_DIRECTORY}" + after_deploy: + - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; + - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; + + + + # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging + - stage: &_DEB_TEMPLATE + name: "Build & Publish DEB package" + before_install: + - sudo apt-get install -y wget lxc lxc-templates dh-make git-buildpackage build-essential libdistro-info-perl + - source tests/installer/slack.sh + before_script: + - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}" + - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh + - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh + - sudo chown -R root:travis "/var/lib/lxc" + - sudo chmod -R 750 "/var/lib/lxc" + - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}" + before_deploy: + - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found" + deploy: + - provider: packagecloud + repository: "${DEPLOY_REPO}" + username: "${PACKAGING_USER}" + token: "${PKG_CLOUD_TOKEN}" + dist: "${BUILD_STRING}" + local_dir: "${PACKAGES_DIRECTORY}" + skip_cleanup: true + on: + # Only deploy on ${USER}/netdata, master branch, when build-area directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: "master" + condition: -d "${PACKAGES_DIRECTORY}" + after_deploy: + - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi; + - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi; @@ -209,464 +192,447 @@ stages: # jobs: include: - # Do code quality, syntax checking and other pre-build activities - - stage: Code quality, linting, syntax, code style + # Do code quality, syntax checking and other pre-build activities + - stage: Code quality, linting, syntax, code style - name: Run shellchecking on BASH - script: shellcheck --format=gcc $(find . -name '*.sh.in' -not -iwholename '*.git*') + name: Run shellchecking on BASH + script: shellcheck --format=gcc $(find . -name '*.sh.in' -not -iwholename '*.git*') - # This falls under same stage defined earlier - - name: Run checksum checks on kickstart files - script: ./tests/installer/checksums.sh - env: LOCAL_ONLY="true" + # This falls under same stage defined earlier + - name: Run checksum checks on kickstart files + script: ./tests/installer/checksums.sh + env: LOCAL_ONLY="true" - # This falls under same stage defined earlier - - name: Web Dashboard pre-generated file consistency checks (dashboard.js) - script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js + # This falls under same stage defined earlier + - name: Web Dashboard pre-generated file consistency checks (dashboard.js) + script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js - # Ensure netdata code builds successfully - - stage: Build process + # Ensure netdata code builds successfully + - stage: Build process - name: Standard netdata build - script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto - env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' - after_failure: post_message "TRAVIS_MESSAGE" "<!here> standard netdata build is failing (Still dont know which one, will improve soon)" + name: Standard netdata build + script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto + env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' + after_failure: post_message "TRAVIS_MESSAGE" "<!here> standard netdata build is failing (Still dont know which one, will improve soon)" - - name: Docker container build process (alpine installation) - script: packaging/docker/build.sh - env: DEVEL="true" - after_failure: post_message "TRAVIS_MESSAGE" "Docker build process failed" + - name: Docker container build process (alpine installation) + script: packaging/docker/build.sh + env: DEVEL="true" + after_failure: post_message "TRAVIS_MESSAGE" "Docker build process failed" - - name: Run 'make dist' validation - before_script: mkdir /tmp/netdata-makedist-test - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make clean || echo "Nothing to clean" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean || echo "Nothing to distclean" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" /bin/bash -c "autoreconf -ivf && ./configure --prefix=/netdata_install/usr --sysconfdir=/netdata_install/etc --localstatedir=/netdata_install/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2" - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make dist - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" ls -ltr ./netdata-$(git describe).tar.gz || ls -ltr ./netdata-$(cat packaging/version | tr -d '\n').tar.gz - - .travis/run_install_with_dist_file.sh - - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean - git: - depth: false - after_script: rm -rf /tmp/netdata-makedist-test - after_failure: post_message "TRAVIS_MESSAGE" "'make dist' failed" + - name: Run 'make dist' validation + before_script: mkdir /tmp/netdata-makedist-test + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make clean || echo "Nothing to clean" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean || echo "Nothing to distclean" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" /bin/bash -c "autoreconf -ivf && ./configure --prefix=/netdata_install/usr --sysconfdir=/netdata_install/etc --localstatedir=/netdata_install/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2" + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make dist + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" ls -ltr ./netdata-$(git describe).tar.gz || ls -ltr ./netdata-$(cat packaging/version | tr -d '\n').tar.gz + - .travis/run_install_with_dist_file.sh + - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean + git: + depth: false + after_script: rm -rf /tmp/netdata-makedist-test + after_failure: post_message "TRAVIS_MESSAGE" "'make dist' failed" - - stage: Artifacts validation + - stage: Artifacts validation - name: Unit Testing - script: - - fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto - - $HOME/netdata/usr/sbin/netdata -W unittest - env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' - after_failure: post_message "TRAVIS_MESSAGE" "Unit testing failed" - - - name: Build/install on ubuntu 14.04 (not containerized) - script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 14.04" + name: Unit Testing + script: + - fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto + - $HOME/netdata/usr/sbin/netdata -W unittest + env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' + after_failure: post_message "TRAVIS_MESSAGE" "Unit testing failed" + + - name: Build/install on ubuntu 14.04 (not containerized) + script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 14.04" - - name: Build/Install for ubuntu 18.04 (not containerized) - script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04" + - name: Build/Install for ubuntu 18.04 (not containerized) + script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04" - - name: Run netdata lifecycle, on ubuntu 18.04 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats - after_failure: post_message "TRAVIS_MESSAGE" "Netdata lifecycle test script failed on ubuntu 18.04" + - name: Run netdata lifecycle, on ubuntu 18.04 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats + after_failure: post_message "TRAVIS_MESSAGE" "Netdata lifecycle test script failed on ubuntu 18.04" - - name: Run netdata lifecycle from stable to current, on CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on CentOS 7" - - - name: Build/install for CentOS 6 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 6" + - name: Run netdata lifecycle from stable to current, on CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on CentOS 7" + + - name: Build/install for CentOS 6 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 6" - - name: Build/install for CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp - after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7" + - name: Build/install for CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7" - - stage: "Artifacts validation on bare OS, stable to current lifecycle checks" - - # Ubuntu runs - name: Run netdata lifecycle on Ubuntu 16.04 (xenial) - script: sudo -E tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 16.04" + - stage: "Artifacts validation on bare OS, stable to current lifecycle checks" + + # Ubuntu runs + name: Run netdata lifecycle on Ubuntu 16.04 (xenial) + script: sudo -E tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 16.04" - - name: Run netdata lifecycle, on Ubuntu 19.04 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "ubuntu:19.04" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 19.04" + - name: Run netdata lifecycle, on Ubuntu 19.04 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "ubuntu:19.04" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 19.04" - # Centos runs - - name: Run netdata lifecycle on CentOS 7 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "centos:7" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare CentOS 7" + # Centos runs + - name: Run netdata lifecycle on CentOS 7 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "centos:7" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare CentOS 7" - # Debian runs - - name: Run netdata lifecycle, on Debian 9 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "debian:stretch" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Debian 9 (stretch)" + # Debian runs + - name: Run netdata lifecycle, on Debian 9 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "debian:stretch" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Debian 9 (stretch)" - # openSuSE runs - - name: Run netdata lifecycle, on openSuSE 15.0 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.0" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.0" + # openSuSE runs + - name: Run netdata lifecycle, on openSuSE 15.0 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.0" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.0" - - name: Run netdata lifecycle, on openSuSE 15.1 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.1" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.1" + - name: Run netdata lifecycle, on openSuSE 15.1 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.1" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.1" - - name: Run netdata lifecycle, on openSuSE Tumbleweed - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/tumbleweed:latest" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/tumbleweed:latest" + - name: Run netdata lifecycle, on openSuSE Tumbleweed + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/tumbleweed:latest" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/tumbleweed:latest" - # Alpine runs - - name: Run netdata lifecycle, on Alpine linux - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "alpine" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Alpine" + # Alpine runs + - name: Run netdata lifecycle, on Alpine linux + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "alpine" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Alpine" - # Arch linux runs - - name: Run netdata lifecycle, on ArchLinux - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "archlinux/base:latest" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare archlinux/base:latest" + # Arch linux runs + - name: Run netdata lifecycle, on ArchLinux + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "archlinux/base:latest" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare archlinux/base:latest" - # Fedora runs - - name: Run netdata lifecycle, on Fedora 28 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:28" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 28" + # Fedora runs + - name: Run netdata lifecycle, on Fedora 28 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:28" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 28" - - name: Run netdata lifecycle, on Fedora 29 - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:29" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 29" + - name: Run netdata lifecycle, on Fedora 29 + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:29" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 29" - - name: Run netdata lifecycle, on Fedora 30 (Containerized) - script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:30" tests/updater_checks.sh - after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 30" + - name: Run netdata lifecycle, on Fedora 30 (Containerized) + script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:30" tests/updater_checks.sh + after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 30" - - stage: Packaging for release + - stage: Packaging for release - name: Generate changelog and TAG the release (only on special commit msg) - before_script: post_message "TRAVIS_MESSAGE" "Packaging step for release initiated" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - .travis/generate_changelog_and_tag_release.sh - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Packaging for release failed" - git: - depth: false + name: Generate changelog and TAG the release (only on special commit msg) + before_script: post_message "TRAVIS_MESSAGE" "Packaging step for release initiated" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - .travis/generate_changelog_and_tag_release.sh + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Packaging for release failed" + git: + depth: false - - name: Run labeler on github issues - script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA + - name: Run labeler on github issues + script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA - # ###### Packaging workflow section ###### - # References: - # https://us.images.linuxcontainers.org - # https://packagecloud.io/docs#install_repo + # ###### Packaging workflow section ###### + # References: + # https://us.images.linuxcontainers.org + # https://packagecloud.io/docs#install_repo - # Ubuntu distros build - # - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/disco" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="disco" BUILD_STRING="ubuntu/disco" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Ubuntu distros build + # + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/disco" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="disco" BUILD_STRING="ubuntu/disco" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/cosmic" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="cosmic" BUILD_STRING="ubuntu/cosmic" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/cosmic" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="cosmic" BUILD_STRING="ubuntu/cosmic" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/bionic" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package ubuntu/bionic" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package ubuntu/artful" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="artful" BUILD_STRING="ubuntu/artful" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Debian distros build + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/buster" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - # Debian distros build - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/buster" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/stretch" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/stretch" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_DEB_TEMPLATE + stage: "Package debian/jessie" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="jessie" BUILD_STRING="debian/jessie" + - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/jessie" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="jessie" BUILD_STRING="debian/jessie" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + # Enterprise linux builds (Centos, Redhat, Amazon linux (el/6)) + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Enterprise Linux 7" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7" + - PACKAGE_TYPE="rpm" REPO_TOOL="yum" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_DEB_TEMPLATE - stage: "Package debian/wheezy" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="wheezy" BUILD_STRING="debian/wheezy" - - PACKAGE_TYPE="deb" REPO_TOOL="apt-get" - - ALLOW_SOFT_FAILURE_HERE=true + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Enterprise linux 6" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="6" BUILD_STRING="el/6" + - PACKAGE_TYPE="rpm" REPO_TOOL="yum" + - ALLOW_SOFT_FAILURE_HERE=true + + # Fedora distros build + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 30" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="30" BUILD_STRING="fedora/30" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true - # Enterprise linux builds (Centos, Redhat, Amazon linux (el/6)) - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Enterprise Linux 7" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7" - - PACKAGE_TYPE="rpm" REPO_TOOL="yum" - - ALLOW_SOFT_FAILURE_HERE=true + + + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 29" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="29" BUILD_STRING="fedora/29" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Enterprise linux 6" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="6" BUILD_STRING="el/6" - - PACKAGE_TYPE="rpm" REPO_TOOL="yum" - - ALLOW_SOFT_FAILURE_HERE=true - - - - # Fedora distros build - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 30" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="30" BUILD_STRING="fedora/30" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 29" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="29" BUILD_STRING="fedora/29" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package Fedora 28" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="28" BUILD_STRING="fedora/28" - - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" - - ALLOW_SOFT_FAILURE_HERE=true - - - - # Opensuse distros build - # - - stage: - <<: *_RPM_TEMPLATE - stage: "Package OpenSuSE 15.1" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.1" - - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" - - ALLOW_SOFT_FAILURE_HERE=true - - - - - stage: - <<: *_RPM_TEMPLATE - stage: "Package OpenSuSE 15.0" - env: - - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.0" - - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" - - ALLOW_SOFT_FAILURE_HERE=true - # ###### End of packaging workflow section ###### # - # ############################################### # - - - - # We only publish if a TAG has been set during packaging - - stage: Publish for release - - name: Build & Publish docker images - before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - packaging/docker/check_login.sh - - packaging/docker/build.sh - - packaging/docker/publish.sh - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Docker image publishing failed" - git: - depth: false - env: ALLOW_SOFT_FAILURE_HERE=true - # We don't run on release candidates - if: tag !~ /(-rc)/ - - - name: Create release draft - before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs - - .travis/draft_release.sh - git: - depth: false - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Draft release submission failed" - # We don't run on release candidates - if: tag !~ /(-rc)/ - - - - # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc) - - stage: Nightly operations - - name: Run coverity scan - # Just notify people that Nightly ops triggered, use the first step as a hook to do that - before_script: post_message "TRAVIS_MESSAGE" "Starting nightly operations" "${NOTIF_CHANNEL}" - script: ./coverity-install.sh && ./coverity-scan.sh || echo "Coverity failed :(" - - - name: Kickstart files integrity testing (extended) - script: ./tests/installer/checksums.sh - - - name: Run labeler on github issues - script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA - - # This is generating the changelog for nightly release and publish it - - name: Generate nightly changelog - before_script: post_message "TRAVIS_MESSAGE" "Starting changelog generation for nightlies" "${NOTIF_CHANNEL}" - script: ".travis/nightlies.sh" - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly changelog generation failed" - git: - depth: false - - - - # This is the nightly execution step - # - - stage: Nightly release - - name: Build & Publish docker images - before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images for nightlies" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - docker info - - packaging/docker/check_login.sh - - packaging/docker/build.sh - - packaging/docker/publish.sh - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly docker image publish failed" - git: - depth: false - env: ALLOW_SOFT_FAILURE_HERE=true - - - name: Create nightly release artifacts, publish to GCS - before_script: post_message "TRAVIS_MESSAGE" "Starting artifacts generation for nightlies" "${NOTIF_CHANNEL}" - script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - .travis/create_artifacts.sh - after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly artifacts generation failed" - git: - depth: false - before_deploy: - echo "Preparing creds under ${TRAVIS_REPO_SLUG}"; - if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - else - echo "Beta deployment stage in progress"; - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - fi; - deploy: - # Beta storage, used for testing purposes - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-dev-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata" - - # Production storage - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: netdata/netdata - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata" - after_deploy: rm -f .travis/gcs-credentials.json + - stage: + <<: *_RPM_TEMPLATE + stage: "Package Fedora 28" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="28" BUILD_STRING="fedora/28" + - PACKAGE_TYPE="rpm" REPO_TOOL="dnf" + - ALLOW_SOFT_FAILURE_HERE=true + + + + # Opensuse distros build + # + - stage: + <<: *_RPM_TEMPLATE + stage: "Package OpenSuSE 15.1" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.1" + - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" + - ALLOW_SOFT_FAILURE_HERE=true + + + + - stage: + <<: *_RPM_TEMPLATE + stage: "Package OpenSuSE 15.0" + env: + - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.0" + - PACKAGE_TYPE="rpm" REPO_TOOL="zypper" + - ALLOW_SOFT_FAILURE_HERE=true + # ###### End of packaging workflow section ###### # + # ############################################### # + + + + # We only publish if a TAG has been set during packaging + - stage: Publish for release + + name: Build & Publish docker images + before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - packaging/docker/check_login.sh + - echo "Switching to latest master branch, to pick up tagging if any" && git checkout master && git pull + - packaging/docker/build.sh + - packaging/docker/publish.sh + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Docker image publishing failed" + git: + depth: false + env: ALLOW_SOFT_FAILURE_HERE=true + # We don't run on release candidates + if: tag !~ /(-rc)/ + + - name: Create release draft + before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs + - .travis/draft_release.sh + git: + depth: false + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Draft release submission failed" + # We don't run on release candidates + if: tag !~ /(-rc)/ + + + + # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc) + - stage: Nightly operations + + name: Run coverity scan + # Just notify people that Nightly ops triggered, use the first step as a hook to do that + before_script: post_message "TRAVIS_MESSAGE" "Starting nightly operations" "${NOTIF_CHANNEL}" + script: ./coverity-install.sh && ./coverity-scan.sh || echo "Coverity failed :(" + + - name: Kickstart files integrity testing (extended) + script: ./tests/installer/checksums.sh + + - name: Run labeler on github issues + script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA + + # This is generating the changelog for nightly release and publish it + - name: Generate nightly changelog + before_script: post_message "TRAVIS_MESSAGE" "Starting changelog generation for nightlies" "${NOTIF_CHANNEL}" + script: + - ".travis/nightlies.sh" + - ".travis/check_changelog_last_modification.sh" + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly changelog generation failed" + git: + depth: false + + + + # This is the nightly execution step + # + - stage: Nightly release + + name: Build & Publish docker images + before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images for nightlies" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - docker info + - packaging/docker/check_login.sh + - packaging/docker/build.sh + - packaging/docker/publish.sh + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly docker image publish failed" + git: + depth: false + env: ALLOW_SOFT_FAILURE_HERE=true + + - name: Create nightly release artifacts, publish to GCS + before_script: post_message "TRAVIS_MESSAGE" "Starting artifacts generation for nightlies" "${NOTIF_CHANNEL}" + script: + - echo "GIT Branch:" && git branch + - echo "Last commit:" && git log -1 + - echo "GIT Describe:" && git describe + - echo "packaging/version:" && cat packaging/version + - .travis/create_artifacts.sh + after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly artifacts generation failed" + git: + depth: false + before_deploy: + echo "Preparing creds under ${TRAVIS_REPO_SLUG}"; + if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then + openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; + else + echo "Beta deployment stage in progress"; + openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; + fi; + deploy: + # Beta storage, used for testing purposes + - provider: gcs + edge: + branch: gcs-ng + project_id: netdata-storage + credentials: .travis/gcs-credentials.json + bucket: "netdata-dev-nightlies" + skip_cleanup: true + local_dir: "artifacts" + on: + # Only deploy on netdata/netdata, master branch, when artifacts directory is created + repo: ${TRAVIS_REPO_SLUG} + branch: master + condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata" + + # Production storage + - provider: gcs + edge: + branch: gcs-ng + project_id: netdata-storage + credentials: .travis/gcs-credentials.json + bucket: "netdata-nightlies" + skip_cleanup: true + local_dir: "artifacts" + on: + # Only deploy on netdata/netdata, master branch, when artifacts directory is created + repo: netdata/netdata + branch: master + condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata" + after_deploy: rm -f .travis/gcs-credentials.json diff --git a/.travis/README.md b/.travis/README.md index b7b61ecb..3b314fa1 100644 --- a/.travis/README.md +++ b/.travis/README.md @@ -138,6 +138,6 @@ We plan to support amd64, x86 and arm64 architectures. As of June 2019 only amd6 The Package deployment can be triggered manually by executing an empty commit with the following message pattern: `[Package PACKAGE_TYPE PACKAGE_ARCH] DESCRIBE_THE_REASONING_HERE`. Travis Yaml configuration allows the user to combine package type and architecture as necessary to regenerate the current stable release (For example tag v1.15.0 as of 4th of May 2019) -Sample patterns to trigger building of packages for all AMD64 supported architecture: -- '[Package AMD64 RPM]': Build & publish all amd64 available RPM packages -- '[Package AMD64 DEB]': Build & publish all amd64 available DEB packages +Sample patterns to trigger building of packages for all amd64 supported architecture: +- '[Package amd64 RPM]': Build & publish all amd64 available RPM packages +- '[Package amd64 DEB]': Build & publish all amd64 available DEB packages diff --git a/.travis/check_changelog_last_modification.sh b/.travis/check_changelog_last_modification.sh new file mode 100755 index 00000000..2665c062 --- /dev/null +++ b/.travis/check_changelog_last_modification.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -e + +LAST_MODIFICATION="$(git log -1 --pretty="format:%at" CHANGELOG.md)" +CURRENT_TIME="$(date +"%s")" +TWO_DAYS_IN_SECONDS=172800 + +DIFF=$((CURRENT_TIME - LAST_MODIFICATION)) + +echo "Checking CHANGELOG.md last modification time on GIT.." +echo "CHANGELOG.md timestamp: ${LAST_MODIFICATION}" +echo "Current timestamp: ${CURRENT_TIME}" +echo "Diff: ${DIFF}" + +if [ ${DIFF} -gt ${TWO_DAYS_IN_SECONDS} ]; then + echo "CHANGELOG.md is more than two days old!" + post_message "TRAVIS_MESSAGE" "Hi <!here>, CHANGELOG.md was found more than two days old (Diff: ${DIFF} seconds)" "${NOTIF_CHANNEL}" +else + echo "CHANGELOG.md is less than two days old, fine" +fi diff --git a/.travis/generate_changelog_for_nightlies.sh b/.travis/generate_changelog_for_nightlies.sh index 68491fa9..b9086288 100755 --- a/.travis/generate_changelog_for_nightlies.sh +++ b/.travis/generate_changelog_for_nightlies.sh @@ -49,6 +49,7 @@ docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest --token "${GITHUB_TOKEN}" \ --since-tag "v1.10.0" \ --unreleased-label "**Next release**" \ + --no-issues \ --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \ --no-compare-link ${OPTS} diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh index 188b37da..00246104 100755 --- a/.travis/nightlies.sh +++ b/.travis/nightlies.sh @@ -38,6 +38,12 @@ if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then fi echo "--- Running Changelog generation ---" -.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || echo "Changelog generation has failed, this is a soft error, process continues" +NIGHTLIES_CHANGELOG_FAILED=0 +.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || NIGHTLIES_CHANGELOG_FAILED=1 + +if [ ${NIGHTLIES_CHANGELOG_FAILED} -eq 1 ]; then + echo "Changelog generation has failed, this is a soft error, process continues" + post_message "TRAVIS_MESSAGE" "Changelog generation job for nightlies failed, possibly due to github issues" "${NOTIF_CHANNEL}" +fi exit "${FAIL}" diff --git a/.travis/package_management/build.sh b/.travis/package_management/build.sh new file mode 100644 index 00000000..beb522a3 --- /dev/null +++ b/.travis/package_management/build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +UNPACKAGED_NETDATA_PATH="$1" +LATEST_RELEASE_VERSION="$2" + +if [ -z "${LATEST_RELEASE_VERSION}" ]; then + echo "Parameter 'LATEST_RELEASE_VERSION' not defined" + exit 1 +fi + +if [ -z "${UNPACKAGED_NETDATA_PATH}" ]; then + echo "Parameter 'UNPACKAGED_NETDATA_PATH' not defined" + exit 1 +fi + +echo "Running changelog generation mechanism since ${LATEST_RELEASE_VERSION}" + +echo "Entering ${UNPACKAGED_NETDATA_PATH}" +cd "${UNPACKAGED_NETDATA_PATH}" + +echo "Linking debian -> contrib/debian" +ln -sf contrib/debian debian + +echo "Executing dpkg-buildpackage" +if dpkg-buildpackage --version 2> /dev/null | grep -q "1.18"; then + dpkg-buildpackage --post-clean --pre-clean --build=binary +else + dpkg-buildpackage -b +fi + +echo "DEB build script completed!" diff --git a/.travis/package_management/common.py b/.travis/package_management/common.py index 6cf59293..6e7a2602 100755 --- a/.travis/package_management/common.py +++ b/.travis/package_management/common.py @@ -6,6 +6,22 @@ import lxc import subprocess +import os + +def fetch_version(orig_build_version): + tag = None + friendly_version = "" + + # TODO: Checksum validations + if str(orig_build_version).count(".latest") == 1: + version_list=str(orig_build_version).replace('v', '').split('.') + friendly_version='.'.join(version_list[0:2]) + "." + version_list[3] + else: + friendly_version = orig_build_version.replace('v', '') + tag = friendly_version # Go to stable tag + print("Version set to %s from %s" % (friendly_version, orig_build_version)) + + return friendly_version, tag def replace_tag(tag_name, spec, new_tag_content): print("Fixing tag %s in %s" % (tag_name, spec)) @@ -44,3 +60,103 @@ def run_command_in_host(cmd): print('Output: ' + o.decode('ascii')) print('Error: ' + e.decode('ascii')) print('code: ' + str(proc.returncode)) + +def prepare_repo(container): + if str(os.environ["REPO_TOOL"]).count("zypper") == 1: + run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"]) + run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"]) + + elif str(os.environ["REPO_TOOL"]).count("yum") == 1: + run_command(container, [os.environ["REPO_TOOL"], "clean", "all"]) + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + + if os.environ["BUILD_STRING"].count("el/7") == 1 and os.environ["BUILD_ARCH"].count("i386") == 1: + print ("Skipping epel-release install for %s-%s" % (os.environ["BUILD_STRING"], os.environ["BUILD_ARCH"])) + else: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"]) + + elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + else: + run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) + + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) + +def install_common_dependendencies(container): + if str(os.environ["REPO_TOOL"]).count("zypper") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c"]) + + elif str(os.environ["REPO_TOOL"]).count("yum") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + + elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "g++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libipmimonitoring-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libjson-c-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libsnappy-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotobuf-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotoc-dev"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + if os.environ["BUILD_STRING"].count("debian/jessie") == 1: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy"]) + else: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) + + if os.environ["BUILD_STRING"].count("el/6") <= 0: + run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autogen"]) + +def prepare_version_source(dest_archive, pkg_friendly_version, tag=None): + print(".0 Preparing local implementation tarball for version %s" % pkg_friendly_version) + tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive + + if tag is not None: + print(".1 Checking out tag %s" % tag) + run_command_in_host(['git', 'fetch', '--all']) + + # TODO: Keep in mind that tricky 'v' there, needs to be removed once we clear our versioning scheme + run_command_in_host(['git', 'checkout', 'v%s' % pkg_friendly_version]) + + print(".2 Tagging the code with version: %s" % pkg_friendly_version) + run_command_in_host(['git', 'tag', '-a', pkg_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]]) + + print(".3 Run autoreconf -ivf") + run_command_in_host(['autoreconf', '-ivf']) + + print(".4 Run configure") + run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata']) + + print(".5 Run make dist") + run_command_in_host(['make', 'dist']) + + print(".6 Copy generated tarbal to desired path") + if os.path.exists('netdata-%s.tar.gz' % pkg_friendly_version): + run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % pkg_friendly_version, tar_file]) + + print(".7 Fixing permissions on tarball") + run_command_in_host(['sudo', 'chmod', '777', tar_file]) + else: + print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % pkg_friendly_version) + sys.exit(1) diff --git a/.travis/package_management/configure_deb_lxc_environment.py b/.travis/package_management/configure_deb_lxc_environment.py index 58999ad3..12328dde 100755 --- a/.travis/package_management/configure_deb_lxc_environment.py +++ b/.travis/package_management/configure_deb_lxc_environment.py @@ -39,27 +39,57 @@ print("Waiting for container connectivity to start configuration sequence") if not container.get_ips(timeout=30): raise Exception("Timeout while waiting for container") +build_path = "/home/%s" % os.environ['BUILDER_NAME'] + # Run the required activities now # 1. Create the builder user print("1. Adding user %s" % os.environ['BUILDER_NAME']) common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) # Fetch package dependencies for the build -print("2. Installing package dependencies within LXC container") -common.run_command(container, ["apt-get", "update", "-y"]) -common.run_command(container, ["apt-get", "install", "-y", "sudo"]) -common.run_command(container, ["apt-get", "install", "-y", "wget"]) -common.run_command(container, ["apt-get", "install", "-y", "bash"]) -common.run_command(container, ["wget", "-T", "15", "-O", "~/.install-required-packages.sh", "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "~/.install-required-packages.sh", "netdata", "--dont-wait", "--non-interactive"]) - -# Download the source -dest_archive="/home/%s/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'],os.environ['BUILD_VERSION']) -release_url="https://github.com/netdata/netdata/releases/download/%s/netdata-%s.tar.gz" % (os.environ['BUILD_VERSION'], os.environ['BUILD_VERSION']) -print("3. Fetch netdata source (%s -> %s)" % (release_url, dest_archive)) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "wget", "-T", "15", "--output-document=" + dest_archive, release_url]) - -print("4. Extracting directory contents to /home " + os.environ['BUILDER_NAME']) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", dest_archive, "-C", "/home/" + os.environ['BUILDER_NAME']]) +print("2. Preparing repo on LXC container") +common.prepare_repo(container) + +print("2.1 Install .DEB build support packages") +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dpkg-dev"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libdistro-info-perl"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-make"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-systemd"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-autoreconf"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "git-buildpackage"]) + +print("2.2 Add more dependencies") +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter-acct-dev"]) +common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) + +print ("3.1 Run install-required-packages scriptlet") +common.run_command(container, ["wget", "-T", "15", "-O", "%s/.install-required-packages.sh" % build_path, "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) +common.run_command(container, ["bash", "%s/.install-required-packages.sh" % build_path, "netdata", "--dont-wait", "--non-interactive"]) + +print("3.2 Installing package dependencies within LXC container") +common.install_common_dependendencies(container) + +friendly_version="" +dest_archive="" +download_url="" +tag = None +friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) + +tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), friendly_version) + +print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) +dest_archive="%s/netdata-%s.tar.gz" % (build_path, friendly_version) + +if str(os.environ["BUILD_STRING"]).count("debian/jessie") == 1: + print("5.1 We are building for Jessie, adjusting control file") + common.run_command_in_host(['sudo', 'rm', 'contrib/debian/control']) + common.run_command_in_host(['sudo', 'cp', 'contrib/debian/control.jessie', 'contrib/debian/control']) + +common.prepare_version_source(dest_archive, friendly_version, tag=tag) + +print("6. Installing build.sh script to build path") +common.run_command_in_host(['sudo', 'cp', '.travis/package_management/build.sh', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) +common.run_command_in_host(['sudo', 'chmod', '777', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) +common.run_command_in_host(['sudo', 'ln', '-sf', 'contrib/debian', 'debian']) print("Done!") diff --git a/.travis/package_management/configure_rpm_lxc_environment.py b/.travis/package_management/configure_rpm_lxc_environment.py index 644e027b..79d34608 100755 --- a/.travis/package_management/configure_rpm_lxc_environment.py +++ b/.travis/package_management/configure_rpm_lxc_environment.py @@ -46,30 +46,15 @@ print("1. Adding user %s" % os.environ['BUILDER_NAME']) common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) # Fetch package dependencies for the build -print("2. Installing package dependencies within LXC container") -if str(os.environ["REPO_TOOL"]).count("zypper") == 1: - common.run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"]) - common.run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"]) - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"]) - -elif str(os.environ["REPO_TOOL"]).count("yum") == 1: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - common.run_command(container, [os.environ["REPO_TOOL"], "clean", "all"]) - common.run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"]) -else: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - common.run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) +print("2.1 Preparing repo on LXC container") +common.prepare_repo(container) + +common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) +common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"]) # Exceptional cases, not available everywhere # - +print("2.2 Running uncommon dependencies and preparing LXC environment") # Not on Centos-7 if os.environ["BUILD_STRING"].count("el/7") <= 0: common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter_acct-devel"]) @@ -78,8 +63,8 @@ if os.environ["BUILD_STRING"].count("el/7") <= 0: if os.environ["BUILD_STRING"].count("el/6") <= 0: common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autoconf-archive"]) -common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"]) +print("2.3 Installing common dependencies") +common.install_common_dependendencies(container) print("3. Setting up macros") common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "/bin/echo", "'%_topdir %(echo /home/" + os.environ['BUILDER_NAME'] + ")/rpmbuild' > /home/" + os.environ['BUILDER_NAME'] + "/.rpmmacros"]) @@ -97,63 +82,21 @@ rpm_friendly_version="" dest_archive="" download_url="" spec_file="/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME'] +tag = None +rpm_friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) +tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version) -# TODO: Checksum validations -if str(os.environ['BUILD_VERSION']).count(".latest") == 1: - version_list=str(os.environ['BUILD_VERSION']).replace('v', '').split('.') - rpm_friendly_version='.'.join(version_list[0:3]) + "." + version_list[3] - - print("Building latest nightly version of netdata..(%s)" % os.environ['BUILD_VERSION']) - dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version) - - print("5. Preparing local latest implementation tarball for version %s" % rpm_friendly_version) - tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive - - print("5.1 Tagging the code with latest version: %s" % rpm_friendly_version) - common.run_command_in_host(['git', 'tag', '-a', rpm_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]]) - - print("5.2 Run autoreconf -ivf") - common.run_command_in_host(['autoreconf', '-ivf']) - - print("5.3 Run configure") - common.run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata']) - - print("5.4 Run make dist") - common.run_command_in_host(['make', 'dist']) - - print("5.5 Copy generated tarbal to desired path") - if os.path.exists('netdata-%s.tar.gz' % rpm_friendly_version): - common.run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % rpm_friendly_version, tar_file]) - - print("5.6 Fixing permissions on tarball") - common.run_command_in_host(['sudo', 'chmod', '777', tar_file]) - else: - print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % rpm_friendly_version) - sys.exit(1) - - # Extract the spec file in place - print("6. Extract spec file from the source") - common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - - print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file)) - common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) -else: - rpm_friendly_version = os.environ['BUILD_VERSION'] - - print("Building latest stable version of netdata.. (%s)" % os.environ['BUILD_VERSION']) - dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'],os.environ['BUILD_VERSION']) - download_url="https://github.com/netdata/netdata/releases/download/%s/netdata-%s.tar.gz" % (os.environ['BUILD_VERSION'], os.environ['BUILD_VERSION']) +print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) +dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version) - print("5. Fetch netdata source into the repo structure(%s -> %s)" % (download_url, dest_archive)) - tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version) - common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "wget", "-T", "15", "--output-document=" + dest_archive, download_url]) +common.prepare_version_source(dest_archive, rpm_friendly_version, tag=tag) - print("6.Extract spec file from the source") - common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "--to-command=cat > %s" % spec_file, "-xvf", dest_archive, "netdata-%s/netdata.spec" % os.environ['BUILD_VERSION']]) +# Extract the spec file in place +print("6. Extract spec file from the source") +common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) +common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - print("7. Temporary hack: Adjust version string on the spec file (%s) to %s and Source0 to %s" % (os.environ['LXC_CONTAINER_ROOT'] + spec_file, rpm_friendly_version, download_url)) - common.replace_tag("Version", os.environ['LXC_CONTAINER_ROOT'] + spec_file, rpm_friendly_version) - common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) +print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file)) +common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) print('Done!') diff --git a/.travis/package_management/create_lxc_for_build.sh b/.travis/package_management/create_lxc_for_build.sh index ae855a74..d733687a 100755 --- a/.travis/package_management/create_lxc_for_build.sh +++ b/.travis/package_management/create_lxc_for_build.sh @@ -83,7 +83,7 @@ case "${BUILD_ARCH}" in .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" ;; "i386"|"amd64"|"arm64") - # AMD64 or i386 + # amd64 or i386 echo "Creating LXC Container for ${BUILD_ARCH}.." export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}" export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" diff --git a/.travis/package_management/functions.sh b/.travis/package_management/functions.sh index 9a467ffe..0c4425fa 100644 --- a/.travis/package_management/functions.sh +++ b/.travis/package_management/functions.sh @@ -10,7 +10,7 @@ set -e function detect_arch_from_commit { case "${TRAVIS_COMMIT_MESSAGE}" in - "[Package AMD64"*) + "[Package amd64"*) export BUILD_ARCH="amd64" ;; "[Package i386"*) @@ -24,7 +24,7 @@ function detect_arch_from_commit { ;; *) - echo "Unknown build architecture '${BUILD_ARCH}' provided" + echo "Unknown build architecture in '${TRAVIS_COMMIT_MESSAGE}'. No BUILD_ARCH can be provided" exit 1 ;; esac diff --git a/.travis/package_management/prepare_packages.sh b/.travis/package_management/prepare_packages.sh index 1fb26a95..12ed07cc 100755 --- a/.travis/package_management/prepare_packages.sh +++ b/.travis/package_management/prepare_packages.sh @@ -27,29 +27,36 @@ for d in ${CREATED_CONTAINERS[@]}; do # Pick up any RPMS from builder RPM_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}/rpmbuild" - echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS" - - if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then - echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/RPMS" - [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}" - fi - - if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then - echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/SRPMS" - [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}" + if [ -d "${RPM_BUILD_PATH}" ]; then + echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS" + + if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then + echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:" + ls -ltrR "${RPM_BUILD_PATH}/RPMS" + [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}" + fi + + if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then + echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:" + ls -ltrR "${RPM_BUILD_PATH}/SRPMS" + [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}" + [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}" + fi + else + DEB_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}" + echo "Checking folder ${DEB_BUILD_PATH} for DEB packages" + if [ -d "${DEB_BUILD_PATH}" ]; then + cp "${DEB_BUILD_PATH}"/netdata*.ddeb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .ddeb files" + cp "${DEB_BUILD_PATH}"/netdata*.deb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .deb files" + cp "${DEB_BUILD_PATH}"/netdata*.buildinfo "${PACKAGES_DIRECTORY}" || echo "Could not copy any .buildinfo files" + cp "${DEB_BUILD_PATH}"/netdata*.changes "${PACKAGES_DIRECTORY}" || echo "Could not copy any .changes files" + else + echo "Folder ${DEB_BUILD_PATH} does not exist or not a directory, nothing to do for package preparation" + fi fi - - # Pick up any DEBs from builder - DEB_BUILD_PATH="${d}/home/${BUILDER_NAME}/build-area" - echo "Checking folder ${DEB_BUILD_PATH} for DEB packages" - #TODO: During debian clean up we 'll fill this up - done chmod -R 777 "${PACKAGES_DIRECTORY}" diff --git a/.travis/package_management/trigger_deb_lxc_build.py b/.travis/package_management/trigger_deb_lxc_build.py index 3040bdd6..a0235a73 100755 --- a/.travis/package_management/trigger_deb_lxc_build.py +++ b/.travis/package_management/trigger_deb_lxc_build.py @@ -37,15 +37,37 @@ if not container.running or not container.state == "RUNNING": if not container.get_ips(timeout=30): raise Exception("Timeout while waiting for container") +build_path = "/home/%s" % os.environ['BUILDER_NAME'] + print("Setting up EMAIL and DEBFULLNAME variables required by the build tools") os.environ["EMAIL"] = "bot@netdata.cloud" os.environ["DEBFULLNAME"] = "Netdata builder" # Run the build process on the container -print("Starting DEB build process, running dh-make") -new_version = os.environ["BUILD_VERSION"].replace('v', '') +new_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) +print("Starting DEB build process for version %s" % new_version) + +netdata_tarball = "%s/netdata-%s.tar.gz" % (build_path, new_version) +unpacked_netdata = netdata_tarball.replace(".tar.gz", "") + +print("Extracting tarball %s" % netdata_tarball) +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", netdata_tarball, "-C", build_path]) + +print("Fixing changelog tags") +changelog_in_host = "contrib/debian/changelog" +common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_VERSION/%s-1/g' % os.environ["LATEST_RELEASE_VERSION"].replace("v", ""), changelog_in_host]) +common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_DATE/%s/g' % os.environ["LATEST_RELEASE_DATE"], changelog_in_host]) + +print("Executing gbp dch command..") +common.run_command_in_host(['gbp', 'dch', '--release', '--ignore-branch', '--spawn-editor=snapshot', '--since=%s' % os.environ["LATEST_RELEASE_VERSION"], '--new-version=%s' % new_version]) + +print("Copying over changelog to the destination machine") +common.run_command_in_host(['sudo', 'cp', 'debian/changelog', "%s/%s/netdata-%s/contrib/debian/" % (os.environ['LXC_CONTAINER_ROOT'], build_path, new_version)]) + +print("Running debian build script since %s" % os.environ["LATEST_RELEASE_VERSION"]) +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "%s/build.sh" % build_path, unpacked_netdata, new_version]) -print("Building the package") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "dpkg-buildpackage", "--host-arch", "amd64", "--target-arch", "amd64", "--post-clean", "--pre-clean", "--build=binary", "--release-by=\"Netdata Builder\"", "--build-by=\"Netdata Builder\""]) +print("Listing contents on build path") +common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltr", build_path]) print('Done!') diff --git a/.travis/package_management/yank_stale_rpm.sh b/.travis/package_management/yank_stale_rpm.sh index 5cf93866..3f766971 100755 --- a/.travis/package_management/yank_stale_rpm.sh +++ b/.travis/package_management/yank_stale_rpm.sh @@ -21,7 +21,7 @@ fi PACKAGES_DIR="$1" DISTRO="$2" -PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep '\.rpm')" +PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep -e '\.rpm' -e '\.deb' -e '\.ddeb' )" if [ ! -d "${PACKAGES_DIR}" ] || [ -z "${PACKAGES_LIST}" ]; then echo "Folder ${PACKAGES_DIR} does not seem to be a valid directory or is empty. No packages to check for yanking" diff --git a/CHANGELOG.md b/CHANGELOG.md index ed154251..d66fc666 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,126 @@ # Changelog +## [v1.16.1](https://github.com/netdata/netdata/tree/v1.16.1) (2019-07-31) + +**Fixed bugs:** + +- /etc/netdata/.environment gets updated constantly with auto-updates [\#6550](https://github.com/netdata/netdata/issues/6550) +- pluginsd python.d slowly eating more and more CPU due to smartd collection [\#6532](https://github.com/netdata/netdata/issues/6532) +- Special characters in configuration files can break the UI [\#6531](https://github.com/netdata/netdata/issues/6531) +- Plugin httpcheck heavily changes the WebUI [\#6530](https://github.com/netdata/netdata/issues/6530) +- Valid let's encrypt certifcate considerated as invalid by streaming client [\#6529](https://github.com/netdata/netdata/issues/6529) +- Can't start netdata in Amazon Linux [\#6522](https://github.com/netdata/netdata/issues/6522) +- Missing file when install the netdata [\#6519](https://github.com/netdata/netdata/issues/6519) +- netdata tengine invalid response length [\#6490](https://github.com/netdata/netdata/issues/6490) +- Snappy library is not detected correctly in all Linux distributions [\#6478](https://github.com/netdata/netdata/issues/6478) +- python sensors collector: sensors chips filtering doesnt work [\#6462](https://github.com/netdata/netdata/issues/6462) +- Streaming not working with ^SSL option in bind to = [\#6457](https://github.com/netdata/netdata/issues/6457) +- fix CRC error handling in dbengine [\#6451](https://github.com/netdata/netdata/issues/6451) +- python.d ERROR: unbound\[local\] : \[Errno 57\] Socket is not connected FreeBSD [\#6434](https://github.com/netdata/netdata/issues/6434) +- Issue with rethinkdbs plugin [\#6429](https://github.com/netdata/netdata/issues/6429) +- Double free or corruption \(again\) [\#6412](https://github.com/netdata/netdata/issues/6412) +- rpm version should be 1.16.0 not v1.16.0 \(extra "v"\) [\#6409](https://github.com/netdata/netdata/issues/6409) +- Netdata does not detect pkg-config under automated install [\#6405](https://github.com/netdata/netdata/issues/6405) +- No LVM disk space usage on CentOS [\#6401](https://github.com/netdata/netdata/issues/6401) +- Cannot see charts in an imported snapshot [\#6384](https://github.com/netdata/netdata/issues/6384) +- netdata does not send notifications for alarms which fail with \(errno 12, Out of memory\) [\#6335](https://github.com/netdata/netdata/issues/6335) +- netdata/packaging: Fine tune documentation regarding package dependencies per distribution [\#6300](https://github.com/netdata/netdata/issues/6300) +- netdata/web: layout getting messed up on certain dimensions when resizing the window [\#6269](https://github.com/netdata/netdata/issues/6269) +- charts.d kills process twice [\#6190](https://github.com/netdata/netdata/issues/6190) +- MacOS Path Issues [\#6165](https://github.com/netdata/netdata/issues/6165) +- Memory leak in power supply module [\#6132](https://github.com/netdata/netdata/issues/6132) +- Question for the Netdata-Dashboard [\#6037](https://github.com/netdata/netdata/issues/6037) +- Fatal errors sometimes fail to halt the netdata daemon [\#5896](https://github.com/netdata/netdata/issues/5896) +- my-netdata menu dynamic sizing [\#5812](https://github.com/netdata/netdata/issues/5812) +- \[Security\] Docker socket exported writable; better use docker socket proxy [\#5680](https://github.com/netdata/netdata/issues/5680) +- Cant see user name when run netdata in docker [\#5585](https://github.com/netdata/netdata/issues/5585) +- No netdata-updater.sh in cron.daily after installing with kickstart-static64.sh [\#4122](https://github.com/netdata/netdata/issues/4122) +- If trailing slash is not included dashboard.js fails to load for slave dashboard for master [\#3820](https://github.com/netdata/netdata/issues/3820) +- Alarm "system.softnet\_stat" is very strict. [\#1076](https://github.com/netdata/netdata/issues/1076) +- netdata/packaging: Move tarball checksum information into lib dir of netdata [\#6555](https://github.com/netdata/netdata/pull/6555) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging: Adopt netdata-updater to run properly for static64 installations. [\#6520](https://github.com/netdata/netdata/pull/6520) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging: Do not deliver edit-config as part of the distribution tarball [\#6507](https://github.com/netdata/netdata/pull/6507) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Stop anonymous stats from writing log in /tmp [\#6491](https://github.com/netdata/netdata/pull/6491) ([cakrit](https://github.com/cakrit)) +- netdata/packaging: Fix RPM packaging workflow issues, plus draft changes for .DEB packaging [\#6415](https://github.com/netdata/netdata/pull/6415) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) + +**Closed issues:** + +- Nodes Views redirects to the agent instead of the nodes view upon login [\#6542](https://github.com/netdata/netdata/issues/6542) +- adding node.js for building web server [\#6521](https://github.com/netdata/netdata/issues/6521) +- Support for ScaleIO/VxFlexOS v3 [\#6475](https://github.com/netdata/netdata/issues/6475) +- Disable HTML email for alarms [\#6458](https://github.com/netdata/netdata/issues/6458) +- netdata/packaging: Make go.d plugin an independent module [\#6367](https://github.com/netdata/netdata/issues/6367) +- Global option for enabling charts with zero metrics [\#6315](https://github.com/netdata/netdata/issues/6315) +- Netdata variable granularity support in netdata daemon - with basic unit testing [\#6255](https://github.com/netdata/netdata/issues/6255) +- Netdata variable granularity support in dbengine [\#6254](https://github.com/netdata/netdata/issues/6254) +- Alarms on system boot [\#6114](https://github.com/netdata/netdata/issues/6114) +- \[Binary releases\] Create a script that will containerise the DEB build process [\#5968](https://github.com/netdata/netdata/issues/5968) +- health\_alarm\_notify Being Overwritten [\#5669](https://github.com/netdata/netdata/issues/5669) +- HTML sanitizer for dashboard\_info.js [\#5652](https://github.com/netdata/netdata/issues/5652) +- badge do not suppport other language [\#3117](https://github.com/netdata/netdata/issues/3117) +- Feature Request: Linux zram device statistics. [\#2578](https://github.com/netdata/netdata/issues/2578) +- Provide an include configuration mechanism [\#2360](https://github.com/netdata/netdata/issues/2360) +- Authentication support [\#70](https://github.com/netdata/netdata/issues/70) + +**Merged pull requests:** + +- Handle disconnected sockets in unbound collector. [\#6561](https://github.com/netdata/netdata/pull/6561) ([Ferroin](https://github.com/Ferroin)) +- netdata/packaging: Notify us when CHANGELOG.md gets too old [\#6556](https://github.com/netdata/netdata/pull/6556) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add configurable default locations for trusted CA certificates [\#6549](https://github.com/netdata/netdata/pull/6549) ([thiagoftsm](https://github.com/thiagoftsm)) +- smartd\_log: use `del\_dimension` instead of `hide\_dimension` to delete inactive disks [\#6547](https://github.com/netdata/netdata/pull/6547) ([ilyam8](https://github.com/ilyam8)) +- redirect after clicking Nodes \> SignIn [\#6544](https://github.com/netdata/netdata/pull/6544) ([jacekkolasa](https://github.com/jacekkolasa)) +- smartd\_log: Disk \_\_eq\_\_ fix [\#6540](https://github.com/netdata/netdata/pull/6540) ([ilyam8](https://github.com/ilyam8)) +- minor - code readability HTTP CODES as defines && clear warnings [\#6539](https://github.com/netdata/netdata/pull/6539) ([underhood](https://github.com/underhood)) +- \[ci skip\] minor/vanity - add self.name\(\) to contrib.md [\#6538](https://github.com/netdata/netdata/pull/6538) ([underhood](https://github.com/underhood)) +- Docs: Remove text about nightly version [\#6534](https://github.com/netdata/netdata/pull/6534) ([joelhans](https://github.com/joelhans)) +- Documentation navigation fix [\#6533](https://github.com/netdata/netdata/pull/6533) ([joelhans](https://github.com/joelhans)) +- .travis.yml: Fix some yamllint errors [\#6526](https://github.com/netdata/netdata/pull/6526) ([knatsakis](https://github.com/knatsakis)) +- mongodb: change `password` to `pass` in the module config [\#6518](https://github.com/netdata/netdata/pull/6518) ([ilyam8](https://github.com/ilyam8)) +- Fixed broken left navbar links in translated docs [\#6505](https://github.com/netdata/netdata/pull/6505) ([cakrit](https://github.com/cakrit)) +- Update CLA with intention to keep netdata FOSS [\#6504](https://github.com/netdata/netdata/pull/6504) ([cakrit](https://github.com/cakrit)) +- netdata/docs: Add @joelhans as co-owner on documentation [\#6501](https://github.com/netdata/netdata/pull/6501) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add support for plain text only emails [\#6485](https://github.com/netdata/netdata/pull/6485) ([leo-lb](https://github.com/leo-lb)) +- netdata/packaging: Enable built-in support for prometheus remote write in packaging [\#6480](https://github.com/netdata/netdata/pull/6480) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Fix the snappy library check [\#6479](https://github.com/netdata/netdata/pull/6479) ([vlvkobal](https://github.com/vlvkobal)) +- Add a statement about permissions for the diskspace plugin [\#6474](https://github.com/netdata/netdata/pull/6474) ([vlvkobal](https://github.com/vlvkobal)) +- Get user and group names from files [\#6472](https://github.com/netdata/netdata/pull/6472) ([vlvkobal](https://github.com/vlvkobal)) +- Fix parsing SSL ACL along with others [\#6468](https://github.com/netdata/netdata/pull/6468) ([thiagoftsm](https://github.com/thiagoftsm)) +- update Nginx guide with changes [\#6466](https://github.com/netdata/netdata/pull/6466) ([prhomhyse](https://github.com/prhomhyse)) +- netdata/packaging: Binary distributions - clean up .DEB package generation process [\#6465](https://github.com/netdata/netdata/pull/6465) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- python sensors collector: sensor chips filtering fix [\#6463](https://github.com/netdata/netdata/pull/6463) ([ilyam8](https://github.com/ilyam8)) +- Fix broken links [\#6461](https://github.com/netdata/netdata/pull/6461) ([TheLovinator1](https://github.com/TheLovinator1)) +- json function could create overflow [\#6460](https://github.com/netdata/netdata/pull/6460) ([thiagoftsm](https://github.com/thiagoftsm)) +- Fix nodes menu sizing \(responsive\) [\#6455](https://github.com/netdata/netdata/pull/6455) ([builat](https://github.com/builat)) +- Add netdata haproxy documentation page [\#6454](https://github.com/netdata/netdata/pull/6454) ([johnramsden](https://github.com/johnramsden)) +- Fix CRC and I/O error handling in dbengine [\#6452](https://github.com/netdata/netdata/pull/6452) ([mfundul](https://github.com/mfundul)) +- Stop docs icon from linking to streaming page instead of docs root [\#6445](https://github.com/netdata/netdata/pull/6445) ([joelhans](https://github.com/joelhans)) +- Add more supported backends to the documentation [\#6443](https://github.com/netdata/netdata/pull/6443) ([vlvkobal](https://github.com/vlvkobal)) +- netdata/packaging: Remove Ventureer from demo sites [\#6442](https://github.com/netdata/netdata/pull/6442) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Safer container names [\#6441](https://github.com/netdata/netdata/pull/6441) ([ViViDboarder](https://github.com/ViViDboarder)) +- Update docs health monitoring and health management api [\#6435](https://github.com/netdata/netdata/pull/6435) ([jghaanstra](https://github.com/jghaanstra)) +- Fix issue with HTML docs generation [\#6433](https://github.com/netdata/netdata/pull/6433) ([cakrit](https://github.com/cakrit)) +- rethinkdb collector new driver support [\#6431](https://github.com/netdata/netdata/pull/6431) ([ilyam8](https://github.com/ilyam8)) +- New 'homepage' for documentation site [\#6428](https://github.com/netdata/netdata/pull/6428) ([joelhans](https://github.com/joelhans)) +- Utf8 Badge Fix And URL Parser International Support \(initial\) [\#6426](https://github.com/netdata/netdata/pull/6426) ([underhood](https://github.com/underhood)) +- Styling improvements to documentation [\#6425](https://github.com/netdata/netdata/pull/6425) ([joelhans](https://github.com/joelhans)) +- Netdata/packaging: Add documentation for binary packages, plus draft table for distributions support [\#6422](https://github.com/netdata/netdata/pull/6422) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- netdata/packaging/doc: Update documentation dependencies [\#6421](https://github.com/netdata/netdata/pull/6421) ([paulkatsoulakis](https://github.com/paulkatsoulakis)) +- Add global configuration option for zero metrics [\#6419](https://github.com/netdata/netdata/pull/6419) ([vlvkobal](https://github.com/vlvkobal)) +- Updated logos in the infographic and remaining favicons [\#6417](https://github.com/netdata/netdata/pull/6417) ([cakrit](https://github.com/cakrit)) +- SSL vs. TLS consistency and clarification in documentation [\#6414](https://github.com/netdata/netdata/pull/6414) ([joelhans](https://github.com/joelhans)) +- Add more codeowners to the core [\#6413](https://github.com/netdata/netdata/pull/6413) ([vlvkobal](https://github.com/vlvkobal)) +- Add news of v1.16.0 to main README [\#6411](https://github.com/netdata/netdata/pull/6411) ([cakrit](https://github.com/cakrit)) +- Update Running-behind-apache.md [\#6406](https://github.com/netdata/netdata/pull/6406) ([Steve8291](https://github.com/Steve8291)) +- Fix Web API Health documentation [\#6404](https://github.com/netdata/netdata/pull/6404) ([thiagoftsm](https://github.com/thiagoftsm)) +- Snapshot uniqueId fix [\#6400](https://github.com/netdata/netdata/pull/6400) ([jacekkolasa](https://github.com/jacekkolasa)) +- Make use of GCC's \_\_attribute\_\_\(\(unused\)\) [\#6392](https://github.com/netdata/netdata/pull/6392) ([ac000](https://github.com/ac000)) +- Change default installation to stable in documentation [\#6388](https://github.com/netdata/netdata/pull/6388) ([joelhans](https://github.com/joelhans)) +- Daemon fix double kills of collection threads on shutdown [\#6387](https://github.com/netdata/netdata/pull/6387) ([emmrk](https://github.com/emmrk)) +- Add apps grouping debug messages [\#6375](https://github.com/netdata/netdata/pull/6375) ([vlvkobal](https://github.com/vlvkobal)) +- Reimplemented mypopen\(\) function family [\#6339](https://github.com/netdata/netdata/pull/6339) ([mfundul](https://github.com/mfundul)) +- ZRAM info collector module \(proc.plugin\) [\#6276](https://github.com/netdata/netdata/pull/6276) ([RaZeR-RBI](https://github.com/RaZeR-RBI)) +- Url parser refactoring [\#6247](https://github.com/netdata/netdata/pull/6247) ([thiagoftsm](https://github.com/thiagoftsm)) + ## [v1.16.0](https://github.com/netdata/netdata/tree/v1.16.0) (2019-07-08) **Fixed bugs:** @@ -774,7 +895,6 @@ - python.d/dockerd plugin update error [\#5200](https://github.com/netdata/netdata/issues/5200) - Netdata registry with basic auth \(behind nginx proxy\) results in error 409 [\#5180](https://github.com/netdata/netdata/issues/5180) - alarm-notify.sh: WARNING: Cannot find file [\#5136](https://github.com/netdata/netdata/issues/5136) -- Netdata w/ Docker Container not show Disk space utilization for mounts [\#5071](https://github.com/netdata/netdata/issues/5071) - zfs charts appear, even when they are zero [\#4115](https://github.com/netdata/netdata/issues/4115) - Ceph - No JSON object could be decoded [\#3563](https://github.com/netdata/netdata/issues/3563) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bfab928..4d631f2f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -320,6 +320,7 @@ set(LIBNETDATA_FILES libnetdata/json/jsmn.h libnetdata/health/health.c libnetdata/health/health.h + libnetdata/string/utf8.h libnetdata/socket/security.c libnetdata/socket/security.h) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 0565644e..1cb02caa 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -28,8 +28,9 @@ The Contributor (*you*) hereby assigns netdata Inc. copyright in his contributions, to be licensed under the same terms as the rest of the code. > *Note: this means we may re-license netdata (your contributions included) -> any way we see fit, without asking your permission. -> Open-source licenses have significant differences and in our attempt to +> any way we see fit, without asking your permission. +> We intend to keep the netdata agent forever FOSS. +> But open-source licenses have significant differences and in our attempt to > help netdata grow we may have to distribute it under a different license. > For example, CNCF, the Cloud Native Computing Foundation, requires netdata > to be licensed under Apache-2.0 for it to be accepted as a member of the @@ -125,5 +126,6 @@ username|name|email (optional) @adherzog|Adam Herzog|adam@adamherzog.com @skrzyp1|Jerzy S.| @akwan|Alan Kwan| +@underhood|Timotej Šiškovič| [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md new file mode 100644 index 00000000..df18a6d4 --- /dev/null +++ b/DOCUMENTATION.md @@ -0,0 +1,53 @@ +# Netdata Documentation + +**Netdata is real-time health monitoring and performance troubleshooting for systems and applications.** It helps you instantly diagnose slowdowns and anomalies in your infrastructure with thousands of metrics, interactive visualizations, and insightful health alarms. + + +## Navigating the Netdata documentation + +Welcome! You've arrived at the documentation for Netdata. Use the links below to find answers to the most common questions about Netdata, such as how to install it, getting started guides, basic configuration, and adding more charts. Or, explore all of Netdata's documentation using the table of contents to your left. + +<div class="homepage-nav"> + + <div class="nav-install"> + <a class="nav-button" href="packaging/installer/#one-line-installation">One-line installation</a> + <p>Use our completely automated one-line installation process to get Netdata on all Linux distributions. Or, find detailed instructions for binary packages, Kubernetes, macOS, and more.</p> + + </div> + <div class="nav-getting-started"> + <a class="nav-button" href="docs/GettingStarted/">Getting started guide</a> + <p>The perfect place for Netdata beginners to start. Learn how to access Netdata's dashboard, start and stop the service, basic configuration, and more.</p> + + </div> + <div class="nav-configuration"> + <a class="nav-button" href="docs/configuration-guide/">Configuration guide</a> + <p>Take your configuration options from the <em>getting started guide</em> to the next level. Increase metrics retention, modify how charts are displayed, disable collectors, and modify alarms.</p> + </div> + +</div> + +**Advanced users**: For those who already understand how to access a Netdata dashboard and perform basic configuration, feel free to see what's behind any of these other doors. + + - [Netdata Behind Nginx](docs/Running-behind-nginx.md): Use an Nginx web server instead of Netdata's built-in server to enable TLS, HTTPS, and basic authentication. + - [Add More Charts](docs/Add-more-charts-to-netdata.md): Enable new internal or external plugins and understand when auto-detection works. + - [Performance](docs/Performance.md): Tips on running Netdata on devices with limited CPU and RAM resources, such as embedded devices, IoT, and edge devices. + - [Streaming](streaming/): Information for those who want to centralize Netdata metrics from any number of distributed agents. + - [Backends](backends/): Learn how to archive Netdata's real-time metrics to a time series database (like Prometheus) for long-term archiving. + + +Visit the [contributing](CONTRIBUTING.md) page to find guides about the Netdata code of conduct, our community, and how you can get started contributing to Netdata. + + +## Subscribe for news and tips from monitoring pros + +<script charset="utf-8" type="text/javascript" src="//js.hsforms.net/forms/shell.js"></script> +<script> + hbspt.forms.create({ + portalId: "4567453", + formId: "6a20deb5-a1e6-4312-9c4d-f6862f947fe0" +}); +</script> + +--- + +![A GIF of the standard Netdata dashboard](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif)
\ No newline at end of file diff --git a/Makefile.am b/Makefile.am index bc928bba..de161c84 100644 --- a/Makefile.am +++ b/Makefile.am @@ -167,6 +167,7 @@ LIBNETDATA_FILES = \ libnetdata/json/jsmn.h \ libnetdata/health/health.c \ libnetdata/health/health.h \ + libnetdata/string/utf8.h \ $(NULL) APPS_PLUGIN_FILES = \ @@ -74,22 +74,24 @@ When you install multiple Netdata, they are integrated into **one distributed ap *in the last 24 hours:*<br/> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) ## Quick Start +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) -You can quickly install Netdata on a Linux box (physical, virtual, container, IoT) with the following command: +To install Netdata from source on any Linux system (physical, virtual, container, IoT, edge) and keep it up to date with our **nightly releases** automatically, run the following: -```sh +```bash # make sure you run `bash` for your shell bash -# install Netdata, directly from github sources +# install Netdata directly from GitHub source bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) + +To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](packaging/installer/README.md#nightly-vs-stable-releases). The above command will: -1. install any required packages on your system (it will ask you to confirm before doing so), -2. compile it, install it and start it +- Install any required packages on your system (it will ask you to confirm before doing so) +- Compile it, install it, and start it. More installation methods and additional options can be found at the [installation page](packaging/installer/#installation). @@ -98,6 +100,8 @@ To try Netdata in a docker container, run this: ``` docker run -d --name=netdata \ -p 19999:19999 \ + -v /etc/passwd:/host/etc/passwd:ro \ + -v /etc/group:/host/etc/group:ro \ -v /proc:/host/proc:ro \ -v /sys:/host/sys:ro \ -v /var/run/docker.sock:/var/run/docker.sock:ro \ @@ -146,6 +150,32 @@ not just visualize metrics. ## News +`Jul 9th, 2019` - **[Netdata v1.16.0 released!](https://github.com/netdata/netdata/releases)** + +Release v1.16.0 contains 40 bug fixes, 31 improvements and 20 documentation updates + +**Binary distributions.** To improve the security, speed and reliability of new netdata installations, we are delivering our own, industry standard installation method, with binary package distributions. The RPM binaries for the most common OSs are already available on packagecloud and we’ll have the DEB ones available very soon. All distributions are considered in Beta and, as always, we depend on our amazing community for feedback on improvements. + + - Our stable distributions are at [netdata/netdata @ packagecloud.io](https://packagecloud.io/netdata/netdata) + - The nightly builds are at [netdata/netdata-edge @ packagecloud.io](https://packagecloud.io/netdata/netdata-edge) + +**Netdata now supports TLS encryption!** You can secure the communication to the [web server](https://docs.netdata.cloud/web/server/#enabling-tls-support), the [streaming connections from slaves to the master](https://docs.netdata.cloud/streaming/#securing-the-communication) and the connection to an [openTSDB backend](https://docs.netdata.cloud/backends/opentsdb/#https). + +**This version also brings two long-awaited features to netdata’s health monitoring:** + + - The [health management API](https://docs.netdata.cloud/web/api/health/#health-management-api) introduced in v1.12 allowed you to easily disable alarms and/or notifications while netdata was running. However, those changes were not persisted across netdata restarts. Since part of routine maintenance activities may involve completely restarting a monitoring node, netdata now saves these configurations to disk, every time you issue a command to change the silencer settings. The new [LIST command](https://docs.netdata.cloud/web/api/health/#list-silencers) of the API allows you to view at any time which alarms are currently disabled or silenced. + - A way for netdata to [repeatedly send alarm notifications](https://docs.netdata.cloud/health/#alarm-line-repeat) for some, or all active alarms, at a frequency of your choosing. As a result, you will no longer have to worry about missing a notification, forgetting about a raised alarm. The default is still to only send a single notification, so that existing users are not surprised by a different behavior. + +As always, we’ve introduced new collectors, 5 of them this time: + + - Of special interest to people with Windows servers in their infrastructure is the [WMI collector](https://docs.netdata.cloud/collectors/go.d.plugin/modules/wmi/), though we are fully aware that we need to continue our efforts to do a proper port to Windows. + - The new `perf` plugin collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using the `perf_event_open()` system call. You can read a wonderful article on why this is useful [here](http://www.brendangregg.com/blog/2017-05-09/cpu-utilization-is-wrong.html). + - The other three are collectors to monitor [Dnsmasq DHCP leases](https://docs.netdata.cloud/collectors/go.d.plugin/modules/dnsmasq_dhcp/), [Riak KV servers](https://docs.netdata.cloud/collectors/python.d.plugin/riakkv/) and [Pihole instances](https://docs.netdata.cloud/collectors/go.d.plugin/modules/pihole/). + +Finally, the DB Engine introduced in v1.15.0 now uses much less memory and is more robust than before. + +--- + `May 21st, 2019` - **[Netdata v1.15.0 released!](https://github.com/netdata/netdata/releases)** Release v1.15.0 contains 11 bug fixes and 30 improvements. @@ -268,7 +298,7 @@ The result is a highly efficient, low latency system, supporting multiple reader This is a high level overview of Netdata feature set and architecture. Click it to to interact with it (it has direct links to documentation). -[![image](https://user-images.githubusercontent.com/2662304/47672043-a47eb480-dbb9-11e8-92a4-fa422d053309.png)](https://my-netdata.io/infographic.html) +[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html) ## Features @@ -294,7 +324,7 @@ This is what you should expect from Netdata: - **Notifications**: [alerta.io](health/notifications/alerta/), [amazon sns](health/notifications/awssns/), [discordapp.com](health/notifications/discord/), [email](health/notifications/email/), [flock.com](health/notifications/flock/), [irc](health/notifications/irc/), [kavenegar.com](health/notifications/kavenegar/), [messagebird.com](health/notifications/messagebird/), [pagerduty.com](health/notifications/pagerduty/), [prowl](health/notifications/prowl/), [pushbullet.com](health/notifications/pushbullet/), [pushover.net](health/notifications/pushover/), [rocket.chat](health/notifications/rocketchat/), [slack.com](health/notifications/slack/), [smstools3](health/notifications/smstools3/), [syslog](health/notifications/syslog/), [telegram.org](health/notifications/telegram/), [twilio.com](health/notifications/twilio/), [web](health/notifications/web/) and [custom notifications](health/notifications/custom/). ### Integrations -- **time-series dbs** - can archive its metrics to `graphite`, `opentsdb`, `prometheus`, json document DBs, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). +- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). ## Visualization diff --git a/backends/README.md b/backends/README.md index bdc40901..ef5baa1b 100644 --- a/backends/README.md +++ b/backends/README.md @@ -33,7 +33,9 @@ X seconds (though, it can send them per second if you need it to). - **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata. - **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by - a lot of [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)) + **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**, + **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics**, + and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)) metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus protocol](prometheus/). Notes on using the remote write backend are [here](prometheus/remote_write/). diff --git a/backends/opentsdb/README.md b/backends/opentsdb/README.md index 3d57e2e1..ab1f08bd 100644 --- a/backends/opentsdb/README.md +++ b/backends/opentsdb/README.md @@ -1,7 +1,6 @@ # OpenTSDB with HTTP -Since version 1.16 the Netdata has the feature to communicate with OpenTSDB using HTTP API. To enable this channel -it is necessary to set the following options in your netdata.conf +Netdata can easily communicate with OpenTSDB using HTTP API. To enable this channel, set the following options in your `netdata.conf`: ``` [backend] @@ -9,13 +8,13 @@ it is necessary to set the following options in your netdata.conf destination = localhost:4242 ``` -, in this example we are considering that OpenTSDB is running with its default port (4242). +In this example, OpenTSDB is running with its default port, which is `4242`. If you run OpenTSDB on a different port, change the `destination = localhost:4242` line accordingly. ## HTTPS -Netdata also supports sending the metrics using SSL/TLS, but OpenTDSB does not have support to safety connections, -so it will be necessary to configure a reverse-proxy to enable the HTTPS communication. After to configure your proxy the -following changes must be done in the netdata.conf: +As of [v1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0), Netdata can send metrics to OpenTSDB using TLS/SSL. Unfortunately, OpenTDSB does not support encrypted connections, so you will have to configure a reverse proxy to enable HTTPS communication between Netdata and OpenTSBD. You can set up a reverse proxy with [Nginx](../../docs/Running-behind-nginx.md). + +After your proxy is configured, make the following changes to `netdata.conf`: ``` [backend] @@ -23,4 +22,4 @@ following changes must be done in the netdata.conf: destination = localhost:8082 ``` -In this example we used the port 8082 for our reverse proxy. +In this example, we used the port `8082` for our reverse proxy. If your reverse proxy listens on a different port, change the `destination = localhost:8082` line accordingly. diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index 4f2b2f8f..a757a5bd 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -191,6 +191,12 @@ double utime_fix_ratio = 1.0, cminflt_fix_ratio = 1.0, cmajflt_fix_ratio = 1.0; + +struct pid_on_target { + int32_t pid; + struct pid_on_target *next; +}; + // ---------------------------------------------------------------------------- // target // @@ -262,6 +268,8 @@ struct target { int starts_with; // if set, the compare string matches only the // beginning of the command + struct pid_on_target *root_pid; // list of aggregated pids for target debugging + struct target *target; // the one that will be reported to netdata struct target *next; }; @@ -492,6 +500,187 @@ static int all_files_size = 0; // ---------------------------------------------------------------------------- +// read users and groups from files + +struct user_or_group_id { + avl avl; + + union { + uid_t uid; + gid_t gid; + } id; + + char *name; + + int updated; + + struct user_or_group_id * next; +}; + +enum user_or_group_id_type { + USER_ID, + GROUP_ID +}; + +struct user_or_group_ids{ + enum user_or_group_id_type type; + + avl_tree index; + struct user_or_group_id *root; + + char filename[FILENAME_MAX + 1]; +}; + +int user_id_compare(void* a, void* b) { + if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid) + return -1; + + else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid) + return 1; + + else + return 0; +} + +struct user_or_group_ids all_user_ids = { + .type = USER_ID, + + .index = { + NULL, + user_id_compare + }, + + .root = NULL, + + .filename = "", +}; + +int group_id_compare(void* a, void* b) { + if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid) + return -1; + + else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid) + return 1; + + else + return 0; +} + +struct user_or_group_ids all_group_ids = { + .type = GROUP_ID, + + .index = { + NULL, + group_id_compare + }, + + .root = NULL, + + .filename = "", +}; + +int file_changed(const struct stat *statbuf, struct timespec *last_modification_time) { + if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec && + statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0; + + last_modification_time->tv_sec = statbuf->st_mtim.tv_sec; + last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec; + + return 1; +} + +int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) { + struct stat statbuf; + if(unlikely(stat(ids->filename, &statbuf))) + return 1; + else + if(likely(!file_changed(&statbuf, last_modification_time))) return 0; + + procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 1; + + size_t line, lines = procfile_lines(ff); + + for(line = 0; line < lines ;line++) { + size_t words = procfile_linewords(ff, line); + if(unlikely(words < 3)) continue; + + char *name = procfile_lineword(ff, line, 0); + if(unlikely(!name || !*name)) continue; + + char *id_string = procfile_lineword(ff, line, 2); + if(unlikely(!id_string || !*id_string)) continue; + + + struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id)); + + if(ids->type == USER_ID) + user_or_group_id->id.uid = (uid_t)str2ull(id_string); + else + user_or_group_id->id.gid = (uid_t)str2ull(id_string); + + user_or_group_id->name = strdupz(name); + user_or_group_id->updated = 1; + + struct user_or_group_id *existing_user_id = NULL; + + if(likely(ids->root)) + existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl *) user_or_group_id); + + if(unlikely(existing_user_id)) { + freez(existing_user_id->name); + existing_user_id->name = user_or_group_id->name; + existing_user_id->updated = 1; + freez(user_or_group_id); + } + else { + if(unlikely(avl_insert(&ids->index, (avl *) user_or_group_id) != (void *) user_or_group_id)) { + error("INTERNAL ERROR: duplicate indexing of id during realloc"); + }; + + user_or_group_id->next = ids->root; + ids->root = user_or_group_id; + } + } + + procfile_close(ff); + + // remove unused ids + struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL; + + while(user_or_group_id) { + if(unlikely(!user_or_group_id->updated)) { + if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl *) user_or_group_id) != user_or_group_id)) + error("INTERNAL ERROR: removal of unused id from index, removed a different id"); + + if(prev_user_id) + prev_user_id->next = user_or_group_id->next; + else + ids->root = user_or_group_id->next; + + freez(user_or_group_id->name); + freez(user_or_group_id); + + if(prev_user_id) + user_or_group_id = prev_user_id->next; + else + user_or_group_id = ids->root; + } + else { + user_or_group_id->updated = 0; + + prev_user_id = user_or_group_id; + user_or_group_id = user_or_group_id->next; + } + } + + return 0; +} + +// ---------------------------------------------------------------------------- // apps_groups.conf // aggregate all processes in groups, to have a limited number of dimensions @@ -508,11 +697,27 @@ static struct target *get_users_target(uid_t uid) { snprintfz(w->id, MAX_NAME, "%u", uid); w->idhash = simple_hash(w->id); - struct passwd *pw = getpwuid(uid); - if(!pw || !pw->pw_name || !*pw->pw_name) - snprintfz(w->name, MAX_NAME, "%u", uid); - else - snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + struct user_or_group_id user_id_to_find, *user_or_group_id = NULL; + user_id_to_find.id.uid = uid; + + if(*netdata_configured_host_prefix) { + static struct timespec last_passwd_modification_time; + int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time); + + if(likely(!ret && all_user_ids.index.root)) + user_or_group_id = (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl *) &user_id_to_find); + } + + if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name) { + snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name); + } + else { + struct passwd *pw = getpwuid(uid); + if(!pw || !pw->pw_name || !*pw->pw_name) + snprintfz(w->name, MAX_NAME, "%u", uid); + else + snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + } netdata_fix_chart_name(w->name); @@ -540,11 +745,27 @@ struct target *get_groups_target(gid_t gid) snprintfz(w->id, MAX_NAME, "%u", gid); w->idhash = simple_hash(w->id); - struct group *gr = getgrgid(gid); - if(!gr || !gr->gr_name || !*gr->gr_name) - snprintfz(w->name, MAX_NAME, "%u", gid); - else - snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); + struct user_or_group_id group_id_to_find, *group_id = NULL; + group_id_to_find.id.gid = gid; + + if(*netdata_configured_host_prefix) { + static struct timespec last_group_modification_time; + int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time); + + if(likely(!ret && all_group_ids.index.root)) + group_id = (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl *) &group_id_to_find); + } + + if(group_id && group_id->name && *group_id->name) { + snprintfz(w->name, MAX_NAME, "%s", group_id->name); + } + else { + struct group *gr = getgrgid(gid); + if(!gr || !gr->gr_name || !*gr->gr_name) + snprintfz(w->name, MAX_NAME, "%u", gid); + else + snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); + } netdata_fix_chart_name(w->name); @@ -2006,7 +2227,7 @@ static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t tim return indent + 1; } -static inline void debug_print_process_tree(struct pid_stat *p, char *msg) { +static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) { debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited"); debug_print_process_and_parents(p, p->stat_collected_usec); } @@ -2657,6 +2878,18 @@ static size_t zero_all_targets(struct target *root) { w->openeventpolls = 0; w->openother = 0; } + + if(unlikely(w->root_pid)) { + struct pid_on_target *pid_on_target_to_free, *pid_on_target = w->root_pid; + + while(pid_on_target) { + pid_on_target_to_free = pid_on_target; + pid_on_target = pid_on_target->next; + free(pid_on_target_to_free); + } + + w->root_pid = NULL; + } } return count; @@ -2799,8 +3032,14 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, w->processes++; w->num_threads += p->num_threads; - if(unlikely(debug_enabled || w->debug_enabled)) + if(unlikely(debug_enabled || w->debug_enabled)) { debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt); + + struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target)); + pid_on_target->pid = p->pid; + pid_on_target->next = w->root_pid; + w->root_pid = pid_on_target; + } } static void calculate_netdata_statistics(void) { @@ -3321,6 +3560,18 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type for(w = root ; w ; w = w->next) { if (w->target) continue; + if(unlikely(w->processes && (debug_enabled || w->debug_enabled))) { + struct pid_on_target *pid_on_target; + + fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes, (w->processes == 1)?"":"es"); + + for(pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) { + fprintf(stderr, " %d", pid_on_target->pid); + } + + fputc('\n', stderr); + } + if (!w->exposed && w->processes) { newly_added++; w->exposed = 1; @@ -3788,6 +4039,12 @@ int main(int argc, char **argv) { info("started on pid %d", getpid()); + snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix); + debug_log("passwd file: '%s'", all_user_ids.filename); + + snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix); + debug_log("group file: '%s'", all_group_ids.filename); + #if (ALL_PIDS_ARE_READ_INSTANTLY == 0) all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max); #endif diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md index c01f9ec0..6ec9024d 100644 --- a/collectors/cgroups.plugin/README.md +++ b/collectors/cgroups.plugin/README.md @@ -110,6 +110,8 @@ By default, Netdata will enable monitoring metrics only when they are not zero. enable memory (used mem including cache) = yes ``` +You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + ### alarms CPU and memory limits are watched and used to rise alarms. Memory usage for every cgroup is checked against `ram` and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alarms is available in `health.d/cgroups.conf` file. diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in index 48f52388..784c0604 100755 --- a/collectors/cgroups.plugin/cgroup-name.sh.in +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -53,18 +53,25 @@ function docker_get_name_classic() { } function docker_get_name_api() { - local id="${1}" - if [ ! -S "${DOCKER_HOST}" ]; then - warning "Can't find ${DOCKER_HOST}" + local path="/containers/${1}/json" + if [ -z "${DOCKER_HOST}" ]; then + warning "No DOCKER_HOST is set" return 1 fi if ! command -v jq >/dev/null 2>&1; then warning "Can't find jq command line tool. jq is required for netdata to retrieve docker container name using ${DOCKER_HOST} API, falling back to docker ps" return 1 fi - - info "Running API command: /containers/${id}/json" - JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\\r\\n" | nc -U "${DOCKER_HOST}" | grep '^{.*') + if [ -S "${DOCKER_HOST}" ]; then + info "Running API command: curl --unix-socket ${DOCKER_HOST} http://localhost${path}" + JSON=$(curl -sS --unix-socket "${DOCKER_HOST}" "http://localhost${path}") + elif [ "${DOCKER_HOST}" == "/var/run/docker.sock" ]; then + warning "Docker socket was not found at ${DOCKER_HOST}" + return 1 + else + info "Running API command: curl ${DOCKER_HOST}${path}" + JSON=$(curl -sS "${DOCKER_HOST}${path}") + fi NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') return 0 } diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c index 87cb5ef1..d4f99006 100644 --- a/collectors/cgroups.plugin/cgroup-network.c +++ b/collectors/cgroups.plugin/cgroup-network.c @@ -86,7 +86,7 @@ unsigned int read_iface_ifindex(const char *prefix, const char *iface) { return (unsigned int)ifindex; } -struct iface *read_proc_net_dev(const char *scope, const char *prefix) { +struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *prefix) { if(!prefix) prefix = ""; procfile *ff = NULL; diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 4300788d..d9d130f7 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -575,7 +575,8 @@ static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) { cp->updated = 1; - if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) + if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && + (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) cp->enabled = CONFIG_BOOLEAN_YES; } } @@ -611,7 +612,8 @@ static inline void cgroup2_read_cpuacct_stat(struct cpuacct_stat *cp) { cp->updated = 1; - if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) + if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && + (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) cp->enabled = CONFIG_BOOLEAN_YES; } } @@ -668,7 +670,8 @@ static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) { ca->updated = 1; - if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total)) + if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && + (total || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) ca->enabled = CONFIG_BOOLEAN_YES; } } @@ -737,7 +740,7 @@ static inline void cgroup_read_blkio(struct blkio *io) { io->updated = 1; if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(io->Read || io->Write)) + if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) io->enabled = CONFIG_BOOLEAN_YES; else io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; @@ -787,7 +790,7 @@ static inline void cgroup2_read_blkio(struct blkio *io, unsigned int word_offset io->updated = 1; if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(io->Read || io->Write)) + if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) io->enabled = CONFIG_BOOLEAN_YES; else io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; @@ -881,7 +884,8 @@ static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unif if(( (!parent_cg_is_unified) && ( mem->total_cache || mem->total_dirty || mem->total_rss || mem->total_rss_huge || mem->total_mapped_file || mem->total_writeback || mem->total_swap || mem->total_pgpgin || mem->total_pgpgout || mem->total_pgfault || mem->total_pgmajfault)) || (parent_cg_is_unified && ( mem->anon || mem->total_dirty || mem->kernel_stack || mem->slab || mem->sock || mem->total_writeback - || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault))) + || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault)) + || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES) mem->enabled_detailed = CONFIG_BOOLEAN_YES; else mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations; @@ -893,14 +897,16 @@ memory_next: // read usage_in_bytes if(likely(mem->filename_usage_in_bytes)) { mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes); - if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes)) + if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && + (mem->usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES; } // read msw_usage_in_bytes if(likely(mem->filename_msw_usage_in_bytes)) { mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes); - if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes)) + if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && + (mem->msw_usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES; } @@ -913,10 +919,10 @@ memory_next: else { mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt); if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!mem->failcnt)) - mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; - else + if(unlikely(mem->failcnt || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) mem->enabled_failcnt = CONFIG_BOOLEAN_YES; + else + mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; } } } diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md index 8f859e35..ff98a874 100644 --- a/collectors/diskspace.plugin/README.md +++ b/collectors/diskspace.plugin/README.md @@ -1,6 +1,6 @@ # diskspace.plugin -This plugin monitors the disk space usage of mounted disks, under Linux. +This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point. Two charts are available for every mount: - Disk Space Usage @@ -10,7 +10,7 @@ Two charts are available for every mount: Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`. -By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. ``` diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c index 77b87b09..eab607d8 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.c +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -249,7 +249,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { int rendered = 0; - if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (bavail || breserved_root || bused))) { + if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && + (bavail || breserved_root || bused || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!m->st_space)) { m->do_space = CONFIG_BOOLEAN_YES; m->st_space = rrdset_find_bytype_localhost("disk_space", disk); @@ -289,7 +291,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { // -------------------------------------------------------------------------- - if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (favail || freserved_root || fused))) { + if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && + (favail || freserved_root || fused || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!m->st_inodes)) { m->do_inodes = CONFIG_BOOLEAN_YES; m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk); diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md index 237e6092..618e053d 100644 --- a/collectors/freebsd.plugin/README.md +++ b/collectors/freebsd.plugin/README.md @@ -2,4 +2,6 @@ Collects resource usage and performance data on FreeBSD systems +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c index 81a301e4..910def59 100644 --- a/collectors/freebsd.plugin/freebsd_devstat.c +++ b/collectors/freebsd.plugin/freebsd_devstat.c @@ -352,7 +352,8 @@ int do_kern_devstat(int update_every, usec_t dt) { if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO && (dstat[i].bytes[DEVSTAT_READ] || dstat[i].bytes[DEVSTAT_WRITE] || - dstat[i].bytes[DEVSTAT_FREE]))) { + dstat[i].bytes[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_io)) { dm->st_io = rrdset_create_localhost("disk", disk, @@ -389,7 +390,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_ops)) { dm->st_ops = rrdset_create_localhost("disk_ops", disk, @@ -428,7 +430,9 @@ int do_kern_devstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO && - (dstat[i].start_count || dstat[i].end_count))) { + (dstat[i].start_count || + dstat[i].end_count || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_qops)) { dm->st_qops = rrdset_create_localhost("disk_qops", disk, @@ -457,7 +461,8 @@ int do_kern_devstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO && - cur_dstat.busy_time_ms)) { + (cur_dstat.busy_time_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_util)) { dm->st_util = rrdset_create_localhost("disk_util", disk, @@ -490,7 +495,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (cur_dstat.duration_read_ms || cur_dstat.duration_write_ms || cur_dstat.duration_other_ms || - cur_dstat.duration_free_ms))) { + cur_dstat.duration_free_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_iotime)) { dm->st_iotime = rrdset_create_localhost("disk_iotime", disk, @@ -538,7 +544,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_await)) { dm->st_await = rrdset_create_localhost("disk_await", disk, @@ -603,7 +610,8 @@ int do_kern_devstat(int update_every, usec_t dt) { if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO && (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_avagsz)) { dm->st_avagsz = rrdset_create_localhost("disk_avgsz", disk, @@ -660,7 +668,8 @@ int do_kern_devstat(int update_every, usec_t dt) { (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE] || dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { + dstat[i].operations[DEVSTAT_FREE] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!dm->st_svctm)) { dm->st_svctm = rrdset_create_localhost("disk_svctm", disk, diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c index ac1638ee..7e2293e4 100644 --- a/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -440,7 +440,9 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ibytes) || IFA_DATA(obytes)))) { + (IFA_DATA(ibytes) || + IFA_DATA(obytes) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_bandwidth)) { ifm->st_bandwidth = rrdset_create_localhost("net", ifa->ifa_name, @@ -469,7 +471,11 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) { + (IFA_DATA(ipackets) || + IFA_DATA(opackets) || + IFA_DATA(imcasts) || + IFA_DATA(omcasts) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_packets)) { ifm->st_packets = rrdset_create_localhost("net_packets", ifa->ifa_name, @@ -508,7 +514,9 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) { + (IFA_DATA(ierrors) || + IFA_DATA(oerrors) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_errors)) { ifm->st_errors = rrdset_create_localhost("net_errors", ifa->ifa_name, @@ -538,11 +546,11 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(iqdrops) + (IFA_DATA(iqdrops) || #if __FreeBSD__ >= 11 - || IFA_DATA(oqdrops) -#endif - ))) { + IFA_DATA(oqdrops) || + #endif + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_drops)) { ifm->st_drops = rrdset_create_localhost("net_drops", ifa->ifa_name, @@ -577,7 +585,8 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO && - IFA_DATA(collisions))) { + (IFA_DATA(collisions) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!ifm->st_events)) { ifm->st_events = rrdset_create_localhost("net_events", ifa->ifa_name, diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c index d050c627..58b67a3c 100644 --- a/collectors/freebsd.plugin/freebsd_getmntinfo.c +++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c @@ -216,7 +216,9 @@ int do_getmntinfo(int update_every, usec_t dt) { int rendered = 0; - if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) { + if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && + (mntbuf[i].f_blocks > 2 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_space)) { snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); @@ -255,7 +257,9 @@ int do_getmntinfo(int update_every, usec_t dt) { // -------------------------------------------------------------------------- - if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) { + if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && + (mntbuf[i].f_files > 1 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_inodes)) { snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c index b56fdc07..402813fe 100644 --- a/collectors/freebsd.plugin/freebsd_sysctl.c +++ b/collectors/freebsd.plugin/freebsd_sysctl.c @@ -1941,7 +1941,13 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop || tcpstat.tcps_finwait2_drops))) { + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvpackafterwin || + tcpstat.tcps_rcvafterclose || + tcpstat.tcps_rcvmemdrop || + tcpstat.tcps_persistdrop || + tcpstat.tcps_finwait2_drops || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -1982,7 +1988,9 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvoopack || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2014,7 +2022,11 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_sc_sendcookie || + tcpstat.tcps_sc_recvcookie || + tcpstat.tcps_sc_zonefail || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syncookies = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2050,7 +2062,9 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) { + if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_listendrop || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_listen = CONFIG_BOOLEAN_YES; static RRDSET *st_listen = NULL; @@ -2085,7 +2099,11 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_ce || tcpstat.tcps_ecn_ect0 || tcpstat.tcps_ecn_ect1))) { + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_ecn_ce || + tcpstat.tcps_ecn_ect0 || + tcpstat.tcps_ecn_ect1 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2626,8 +2644,11 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + (ip6stat.ip6s_localout || + ip6stat.ip6s_total || + ip6stat.ip6s_forward || + ip6stat.ip6s_delivered || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2666,8 +2687,10 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { + (ip6stat.ip6s_fragmented || + ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2706,8 +2729,11 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + (ip6stat.ip6s_reassembled || + ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || + ip6stat.ip6s_fragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2747,16 +2773,17 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2872,7 +2899,10 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && + (icmp6_total.msgs_in || + icmp6_total.msgs_out || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6 = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2907,7 +2937,10 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_REDIRECT] || + icmp6stat.icp6s_outhist[ND_REDIRECT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_redir = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -2941,18 +2974,19 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3005,11 +3039,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_echos = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3047,11 +3082,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_router = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3090,11 +3126,12 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_neighbor = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -3133,17 +3170,18 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md index 3e2554e4..fcb4670e 100644 --- a/collectors/macos.plugin/README.md +++ b/collectors/macos.plugin/README.md @@ -2,4 +2,6 @@ Collects resource usage and performance data on MacOS systems +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c index a8af72e6..dddafc9f 100644 --- a/collectors/macos.plugin/macos_sysctl.c +++ b/collectors/macos.plugin/macos_sysctl.c @@ -479,7 +479,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop))) { + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvpackafterwin || + tcpstat.tcps_rcvafterclose || + tcpstat.tcps_rcvmemdrop || + tcpstat.tcps_persistdrop || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpconnaborts"); if (unlikely(!st)) { @@ -514,7 +519,9 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_rcvoopack || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpofo"); if (unlikely(!st)) { @@ -543,7 +550,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_sc_sendcookie || + tcpstat.tcps_sc_recvcookie || + tcpstat.tcps_sc_zonefail || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syscookies = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.tcpsyncookies"); @@ -579,7 +590,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_recv_ce || tcpstat.tcps_ecn_not_supported))) { + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (tcpstat.tcps_ecn_recv_ce || + tcpstat.tcps_ecn_not_supported || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv4.ecnpkts"); if (unlikely(!st)) { @@ -980,8 +994,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { error("DISABLED: ipv6.errors"); } else { if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + (ip6stat.ip6s_localout || + ip6stat.ip6s_total || + ip6stat.ip6s_forward || + ip6stat.ip6s_delivered || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_packets = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.packets"); if (unlikely(!st)) { @@ -1017,8 +1034,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { + (ip6stat.ip6s_fragmented || + ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsout = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.fragsout"); if (unlikely(!st)) { @@ -1053,8 +1072,11 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + (ip6stat.ip6s_reassembled || + ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || + ip6stat.ip6s_fragments || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_fragsin = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.fragsin"); if (unlikely(!st)) { @@ -1090,16 +1112,17 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip6_errors = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.errors"); if (unlikely(!st)) { @@ -1158,7 +1181,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; } icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && + (icmp6_total.msgs_in || + icmp6_total.msgs_out || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6 = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmp"); if (unlikely(!st)) { @@ -1189,7 +1215,10 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_REDIRECT] || + icmp6stat.icp6s_outhist[ND_REDIRECT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_redir = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpredir"); if (unlikely(!st)) { @@ -1220,18 +1249,19 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_errors = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmperrors"); if (unlikely(!st)) { @@ -1279,11 +1309,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_echos = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpechos"); if (unlikely(!st)) { @@ -1318,11 +1349,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_router = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmprouter"); if (unlikely(!st)) { @@ -1357,11 +1389,12 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_neighbor = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmpneighbor"); if (unlikely(!st)) { @@ -1396,17 +1429,18 @@ int do_macos_sysctl(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && + (icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136] || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp6_types = CONFIG_BOOLEAN_YES; st = rrdset_find_localhost("ipv6.icmptypes"); if (unlikely(!st)) { diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c index 66ec5d0e..aea2704a 100644 --- a/collectors/plugins.d/plugins_d.c +++ b/collectors/plugins.d/plugins_d.c @@ -117,6 +117,103 @@ inline int pluginsd_split_words(char *str, char **words, int max_words) { return quoted_strings_splitter(str, words, max_words, pluginsd_space); } +#ifdef ENABLE_HTTPS +/** + * Update Buffer + * + * Update the temporary buffer used to parse data received from slave + * + * @param output is a pointer to the vector where I will store the data + * @param ssl is the connection pointer with the server + * + * @return it returns the total of bytes read on success and a negative number otherwise + */ +int pluginsd_update_buffer(char *output, SSL *ssl) { + ERR_clear_error(); + int bytesleft = SSL_read(ssl, output, PLUGINSD_LINE_MAX_SSL_READ); + if(bytesleft <= 0) { + int sslerrno = SSL_get_error(ssl, bytesleft); + switch(sslerrno) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + { + break; + } + default: + { + u_long err; + char buf[256]; + int counter = 0; + while ((err = ERR_get_error()) != 0) { + ERR_error_string_n(err, buf, sizeof(buf)); + info("%d SSL Handshake error (%s) on socket %d ", counter++, ERR_error_string((long)SSL_get_error(ssl, bytesleft), NULL), SSL_get_fd(ssl)); + } + } + + } + } else { + output[bytesleft] = '\0'; + } + + return bytesleft; +} + +/** + * Get from Buffer + * + * Get data to process from buffer + * + * @param output is the output vector that will be used to parse the string. + * @param bytesread the amount of bytes read in the previous iteration. + * @param input the input vector where there are data to process + * @param ssl a pointer to the connection with the server + * @param src the first address of the input, because sometime will be necessary to restart the addr with it. + * + * @return It returns a pointer for the next iteration on success and NULL otherwise. + */ +char * pluginsd_get_from_buffer(char *output, int *bytesread, char *input, SSL *ssl, char *src) { + int copying = 1; + char *endbuffer; + size_t length; + while(copying) { + if(*bytesread > 0) { + endbuffer = strchr(input, '\n'); + if(endbuffer) { + copying = 0; + endbuffer++; //Advance due the fact I wanna copy '\n' + length = endbuffer - input; + *bytesread -= length; + + memcpy(output, input, length); + output += length; + *output = '\0'; + input += length; + }else { + length = strlen(input); + memcpy(output, input, length); + output += length; + input = src; + + *bytesread = pluginsd_update_buffer(input, ssl); + if(*bytesread <= 0) { + input = NULL; + copying = 0; + } + } + }else { + //reduce sample of bytes read, print the length + *bytesread = pluginsd_update_buffer(input, ssl); + if(*bytesread <= 0) { + input = NULL; + copying = 0; + } + } + } + + return input; +} +#endif + inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) { int enabled = cd->enabled; @@ -149,10 +246,43 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int goto cleanup; } +#ifdef ENABLE_HTTPS + int bytesleft = 0; + char tmpbuffer[PLUGINSD_LINE_MAX]; + char *readfrom; +#endif + char *r = NULL; while(!ferror(fp)) { if(unlikely(netdata_exit)) break; - char *r = fgets(line, PLUGINSD_LINE_MAX, fp); +#ifdef ENABLE_HTTPS + int normalread = 1; + if(netdata_srv_ctx) { + if(host->ssl.conn && !host->ssl.flags) { + if(!bytesleft) { + r = line; + readfrom = tmpbuffer; + bytesleft = pluginsd_update_buffer(readfrom, host->ssl.conn); + if(bytesleft <= 0) { + break; + } + } + + readfrom = pluginsd_get_from_buffer(line, &bytesleft, readfrom, host->ssl.conn, tmpbuffer); + if(!readfrom) { + r = NULL; + } + + normalread = 0; + } + } + + if(normalread) { + r = fgets(line, PLUGINSD_LINE_MAX, fp); + } +#else + r = fgets(line, PLUGINSD_LINE_MAX, fp); +#endif if(unlikely(!r)) { if(feof(fp)) error("read failed: end of file"); @@ -526,6 +656,70 @@ static void pluginsd_worker_thread_cleanup(void *arg) { } } +#define SERIAL_FAILURES_THRESHOLD 10 +static void pluginsd_worker_thread_handle_success(struct plugind *cd) { + if (likely(cd->successful_collections)) { + sleep((unsigned int) cd->update_every); + return; + } + + if(likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) { + info("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", + cd->fullfilename, cd->pid, + cd->enabled ? + "Waiting a bit before starting it again." : + "Will not start it again - it is now disabled."); + sleep((unsigned int) (cd->update_every * 10)); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0)." + "We have tried to collect something %zu times - unsuccessfully. Disabling it.", + cd->fullfilename, cd->pid, cd->serial_failures); + cd->enabled = 0; + return; + } + + return; +} + +static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) { + if (worker_ret_code == -1) { + info("'%s' (pid %d) was killed with SIGTERM. Disabling it.", cd->fullfilename, cd->pid); + cd->enabled = 0; + return; + } + + if (!cd->successful_collections) { + error("'%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", + cd->fullfilename, cd->pid, worker_ret_code); + cd->enabled = 0; + return; + } + + if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", + cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, + cd->enabled ? + "Waiting a bit before starting it again." : + "Will not start it again - it is disabled."); + sleep((unsigned int) (cd->update_every * 10)); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." + "We tried to restart it %zu times, but it failed to generate data. Disabling it.", + cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, cd->serial_failures); + cd->enabled = 0; + return; + } + + return; +} +#undef SERIAL_FAILURES_THRESHOLD + void *pluginsd_worker_thread(void *arg) { netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg); @@ -546,50 +740,14 @@ void *pluginsd_worker_thread(void *arg) { error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count); killpid(cd->pid, SIGTERM); - // get the return code - int code = mypclose(fp, cd->pid); + int worker_ret_code = mypclose(fp, cd->pid); - if(code != 0) { - // the plugin reports failure + if (likely(worker_ret_code == 0)) + pluginsd_worker_thread_handle_success(cd); + else + pluginsd_worker_thread_handle_error(cd, worker_ret_code); - if(likely(!cd->successful_collections)) { - // nothing collected - disable it - error("'%s' (pid %d) exited with error code %d. Disabling it.", cd->fullfilename, cd->pid, code); - cd->enabled = 0; - } - else { - // we have collected something - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). We tried %zu times to restart it, but it failed to generate data. Disabling it.", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->serial_failures); - cd->enabled = 0; - } - } - } - else { - // the plugin reports success - - if(unlikely(!cd->successful_collections)) { - // we have collected nothing so far - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", cd->fullfilename, cd->pid, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is now disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0), but we have tried %zu times to collect something. Disabling it.", cd->fullfilename, cd->pid, cd->serial_failures); - cd->enabled = 0; - } - } - else - sleep((unsigned int) cd->update_every); - } cd->pid = 0; - if(unlikely(!cd->enabled)) break; } diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h index 04d5de3d..7d5c7dda 100644 --- a/collectors/plugins.d/plugins_d.h +++ b/collectors/plugins.d/plugins_d.h @@ -31,6 +31,7 @@ #define PLUGINSD_KEYWORD_VARIABLE "VARIABLE" #define PLUGINSD_LINE_MAX 1024 +#define PLUGINSD_LINE_MAX_SSL_READ 512 #define PLUGINSD_MAX_WORDS 20 #define PLUGINSD_MAX_DIRECTORIES 20 diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md index cacde84f..9513877d 100644 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -75,7 +75,7 @@ netdata will automatically set the name of disks on the dashboard, from the moun ### performance metrics -By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. netdata categorizes all block devices in 3 categories: diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c index cd467948..eee0cbe7 100644 --- a/collectors/proc.plugin/proc_diskstats.c +++ b/collectors/proc.plugin/proc_diskstats.c @@ -973,7 +973,9 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------------- // Do performance metrics - if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) { + if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && + (readsectors || writesectors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_io = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_io)) { @@ -1004,7 +1006,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes))) { + if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_ops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_ops)) { @@ -1037,7 +1040,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && queued_ios)) { + if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && + (queued_ios || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_qops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_qops)) { @@ -1068,7 +1072,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && backlog_ms)) { + if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && + (backlog_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_backlog = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_backlog)) { @@ -1099,7 +1104,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) { + if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && + (busy_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_util = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_util)) { @@ -1130,7 +1136,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && (mreads || mwrites))) { + if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && + (mreads || mwrites || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_mops = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_mops)) { @@ -1163,7 +1170,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) { + if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && + (readms || writems || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->do_iotime = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_iotime)) { @@ -1199,8 +1207,12 @@ int do_proc_diskstats(int update_every, usec_t dt) { // only if this is not the first time we run if(likely(dt)) { - if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && + (readms || writems || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_await)) { d->st_await = rrdset_create_localhost( @@ -1230,8 +1242,10 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_await); } - if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && + (readsectors || writesectors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_avgsz)) { d->st_avgsz = rrdset_create_localhost( @@ -1261,8 +1275,12 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_avgsz); } - if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && + (busy_ms || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && + (reads || writes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { if(unlikely(!d->st_svctm)) { d->st_svctm = rrdset_create_localhost( @@ -1505,7 +1523,11 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_bcache_cache_read_races); } - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_hits != 0 || stats_total_cache_misses != 0 || stats_total_cache_miss_collisions != 0))) { + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && + (stats_total_cache_hits || + stats_total_cache_misses || + stats_total_cache_miss_collisions || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!d->st_bcache)) { d->st_bcache = rrdset_create_localhost( @@ -1539,7 +1561,10 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_bcache); } - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_bypass_hits != 0 || stats_total_cache_bypass_misses != 0))) { + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && + (stats_total_cache_bypass_hits || + stats_total_cache_bypass_misses || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if(unlikely(!d->st_bcache_bypass)) { d->st_bcache_bypass = rrdset_create_localhost( @@ -1575,7 +1600,9 @@ int do_proc_diskstats(int update_every, usec_t dt) { // ------------------------------------------------------------------------ // update the system total I/O - if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && (system_read_kb || system_write_kb))) { + if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && + (system_read_kb || system_write_kb || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { static RRDSET *st_io = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c index ae399c44..92135393 100644 --- a/collectors/proc.plugin/proc_meminfo.c +++ b/collectors/proc.plugin/proc_meminfo.c @@ -219,7 +219,9 @@ int do_proc_meminfo(int update_every, usec_t dt) { unsigned long long SwapUsed = SwapTotal - SwapFree; - if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) { + if(do_swap == CONFIG_BOOLEAN_YES || (do_swap == CONFIG_BOOLEAN_AUTO && + (SwapTotal || SwapUsed || SwapFree || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_swap = CONFIG_BOOLEAN_YES; static RRDSET *st_system_swap = NULL; @@ -256,7 +258,10 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) { + if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && + (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && + (HardwareCorrupted > 0 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) { do_hwcorrupt = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_hwcorrupt = NULL; @@ -438,7 +443,9 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && Hugepagesize != 0 && HugePages_Total != 0)) { + if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && + ((Hugepagesize && HugePages_Total) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_hugepages = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_hugepages = NULL; @@ -479,7 +486,10 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && (AnonHugePages != 0 || ShmemHugePages != 0))) { + if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && + (AnonHugePages || + ShmemHugePages || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_transparent_hugepages = CONFIG_BOOLEAN_YES; static RRDSET *st_mem_transparent_hugepages = NULL; diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index 1e426e97..8d9751d1 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -601,7 +601,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_bandwidth == CONFIG_BOOLEAN_AUTO && (d->rbytes || d->tbytes)))) + if(unlikely(d->do_bandwidth == CONFIG_BOOLEAN_AUTO && + (d->rbytes || d->tbytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_bandwidth = CONFIG_BOOLEAN_YES; if(d->do_bandwidth == CONFIG_BOOLEAN_YES) { @@ -671,7 +672,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_packets == CONFIG_BOOLEAN_AUTO && (d->rpackets || d->tpackets || d->rmulticast)))) + if(unlikely(d->do_packets == CONFIG_BOOLEAN_AUTO && + (d->rpackets || d->tpackets || d->rmulticast || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_packets = CONFIG_BOOLEAN_YES; if(d->do_packets == CONFIG_BOOLEAN_YES) { @@ -716,7 +718,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_errors == CONFIG_BOOLEAN_AUTO && (d->rerrors || d->terrors)))) + if(unlikely(d->do_errors == CONFIG_BOOLEAN_AUTO && + (d->rerrors || d->terrors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_errors = CONFIG_BOOLEAN_YES; if(d->do_errors == CONFIG_BOOLEAN_YES) { @@ -759,7 +762,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_drops == CONFIG_BOOLEAN_AUTO && (d->rdrops || d->tdrops)))) + if(unlikely(d->do_drops == CONFIG_BOOLEAN_AUTO && + (d->rdrops || d->tdrops || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_drops = CONFIG_BOOLEAN_YES; if(d->do_drops == CONFIG_BOOLEAN_YES) { @@ -802,7 +806,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_fifo == CONFIG_BOOLEAN_AUTO && (d->rfifo || d->tfifo)))) + if(unlikely(d->do_fifo == CONFIG_BOOLEAN_AUTO && + (d->rfifo || d->tfifo || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_fifo = CONFIG_BOOLEAN_YES; if(d->do_fifo == CONFIG_BOOLEAN_YES) { @@ -845,7 +850,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_compressed == CONFIG_BOOLEAN_AUTO && (d->rcompressed || d->tcompressed)))) + if(unlikely(d->do_compressed == CONFIG_BOOLEAN_AUTO && + (d->rcompressed || d->tcompressed || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_compressed = CONFIG_BOOLEAN_YES; if(d->do_compressed == CONFIG_BOOLEAN_YES) { @@ -888,7 +894,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(unlikely((d->do_events == CONFIG_BOOLEAN_AUTO && (d->rframe || d->tcollisions || d->tcarrier)))) + if(unlikely(d->do_events == CONFIG_BOOLEAN_AUTO && + (d->rframe || d->tcollisions || d->tcarrier || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) d->do_events = CONFIG_BOOLEAN_YES; if(d->do_events == CONFIG_BOOLEAN_YES) { @@ -924,7 +931,9 @@ int do_proc_net_dev(int update_every, usec_t dt) { } } - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (system_rbytes || system_tbytes))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (system_rbytes || system_tbytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st_system_net = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c index 2dc3c59c..ab8206be 100644 --- a/collectors/proc.plugin/proc_net_netstat.c +++ b/collectors/proc.plugin/proc_net_netstat.c @@ -262,7 +262,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (ipext_InOctets || + ipext_OutOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st_system_ip = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; @@ -297,7 +300,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) { + if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && + (ipext_InNoRoutes || + ipext_InTruncatedPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_inerrors = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_inerrors = NULL; static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL; @@ -336,7 +342,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) { + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && + (ipext_InMcastOctets || + ipext_OutMcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_mcast = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; @@ -373,7 +382,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) { + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && + (ipext_InBcastOctets || + ipext_OutBcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_bcast = NULL; @@ -411,7 +423,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) { + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && + (ipext_InMcastPkts || + ipext_OutMcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_mcastpkts = NULL; @@ -448,7 +463,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) { + if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && + (ipext_InBcastPkts || + ipext_OutBcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_bcastpkts = NULL; @@ -486,7 +504,12 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) { + if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && + (ipext_InCEPkts || + ipext_InECT0Pkts || + ipext_InECT1Pkts || + ipext_InNoECTPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ecn = CONFIG_BOOLEAN_YES; static RRDSET *st_ecnpkts = NULL; @@ -538,7 +561,9 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) { + if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPMemoryPressures || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_memory = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpmemorypressures = NULL; @@ -572,7 +597,14 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) { + if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPAbortOnData || + tcpext_TCPAbortOnClose || + tcpext_TCPAbortOnMemory || + tcpext_TCPAbortOnTimeout || + tcpext_TCPAbortOnLinger || + tcpext_TCPAbortFailed || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_connaborts = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpconnaborts = NULL; @@ -616,7 +648,12 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) { + if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPRenoReorder || + tcpext_TCPFACKReorder || + tcpext_TCPSACKReorder || + tcpext_TCPTSReorder || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_reorder = CONFIG_BOOLEAN_YES; static RRDSET *st_tcpreorders = NULL; @@ -656,7 +693,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) { + if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPOFOQueue || + tcpext_TCPOFODrop || + tcpext_TCPOFOMerge || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_ofo = CONFIG_BOOLEAN_YES; static RRDSET *st_ip_tcpofo = NULL; @@ -697,7 +738,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) { + if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && + (tcpext_SyncookiesSent || + tcpext_SyncookiesRecv || + tcpext_SyncookiesFailed || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syscookies = CONFIG_BOOLEAN_YES; static RRDSET *st_syncookies = NULL; @@ -736,7 +781,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && (tcpext_TCPReqQFullDrop || tcpext_TCPReqQFullDoCookies))) { + if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && + (tcpext_TCPReqQFullDrop || + tcpext_TCPReqQFullDoCookies || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_syn_queue = CONFIG_BOOLEAN_YES; static RRDSET *st_syn_queue = NULL; @@ -775,7 +823,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && (tcpext_ListenOverflows || tcpext_ListenDrops))) { + if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && + (tcpext_ListenOverflows || + tcpext_ListenDrops || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcpext_accept_queue = CONFIG_BOOLEAN_YES; static RRDSET *st_accept_queue = NULL; diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c index bd1062e9..343cc5af 100644 --- a/collectors/proc.plugin/proc_net_sctp_snmp.c +++ b/collectors/proc.plugin/proc_net_sctp_snmp.c @@ -124,7 +124,8 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && SctpCurrEstab)) { + if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && + (SctpCurrEstab || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_associations = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_established = NULL; @@ -155,7 +156,12 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && (SctpActiveEstabs || SctpPassiveEstabs || SctpAborteds || SctpShutdowns))) { + if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && + (SctpActiveEstabs || + SctpPassiveEstabs || + SctpAborteds || + SctpShutdowns || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_transitions = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_active = NULL, @@ -195,7 +201,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && (SctpInSCTPPacks || SctpOutSCTPPacks))) { + if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && + (SctpInSCTPPacks || + SctpOutSCTPPacks || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -230,7 +239,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && (SctpOutOfBlues || SctpChecksumErrors))) { + if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && + (SctpOutOfBlues || + SctpChecksumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_packet_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_invalid = NULL, @@ -265,7 +277,10 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && (SctpFragUsrMsgs || SctpReasmUsrMsgs))) { + if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && + (SctpFragUsrMsgs || + SctpReasmUsrMsgs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_fragmentation = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -300,8 +315,14 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO - && (SctpInCtrlChunks || SctpInOrderChunks || SctpInUnorderChunks || SctpOutCtrlChunks || SctpOutOrderChunks || SctpOutUnorderChunks))) { + if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO && + (SctpInCtrlChunks || + SctpInOrderChunks || + SctpInUnorderChunks || + SctpOutCtrlChunks || + SctpOutOrderChunks || + SctpOutUnorderChunks || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_chunk_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM diff --git a/collectors/proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c index ffd368f6..b03a6ac7 100644 --- a/collectors/proc.plugin/proc_net_snmp.c +++ b/collectors/proc.plugin/proc_net_snmp.c @@ -258,7 +258,12 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_OutRequests || snmp_root.ip_InReceives || snmp_root.ip_ForwDatagrams || snmp_root.ip_InDelivers))) { + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_OutRequests || + snmp_root.ip_InReceives || + snmp_root.ip_ForwDatagrams || + snmp_root.ip_InDelivers || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -299,7 +304,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_FragOKs || snmp_root.ip_FragFails || snmp_root.ip_FragCreates))) { + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_FragOKs || + snmp_root.ip_FragFails || + snmp_root.ip_FragCreates || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -338,7 +347,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_ReasmOKs || snmp_root.ip_ReasmFails || snmp_root.ip_ReasmReqds))) { + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_ReasmOKs || + snmp_root.ip_ReasmFails || + snmp_root.ip_ReasmReqds || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -377,7 +390,14 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_InDiscards || snmp_root.ip_OutDiscards || snmp_root.ip_InHdrErrors || snmp_root.ip_InAddrErrors || snmp_root.ip_InUnknownProtos || snmp_root.ip_OutNoRoutes))) { + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.ip_InDiscards || + snmp_root.ip_OutDiscards || + snmp_root.ip_InHdrErrors || + snmp_root.ip_InAddrErrors || + snmp_root.ip_InUnknownProtos || + snmp_root.ip_OutNoRoutes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -447,7 +467,13 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.icmp_InMsgs || snmp_root.icmp_OutMsgs || snmp_root.icmp_InErrors || snmp_root.icmp_OutErrors || snmp_root.icmp_InCsumErrors))) { + if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.icmp_InMsgs || + snmp_root.icmp_OutMsgs || + snmp_root.icmp_InErrors || + snmp_root.icmp_OutErrors || + snmp_root.icmp_InCsumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_packets = CONFIG_BOOLEAN_YES; { @@ -540,28 +566,28 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && ( - snmp_root.icmpmsg_InEchoReps - || snmp_root.icmpmsg_OutEchoReps - || snmp_root.icmpmsg_InDestUnreachs - || snmp_root.icmpmsg_OutDestUnreachs - || snmp_root.icmpmsg_InRedirects - || snmp_root.icmpmsg_OutRedirects - || snmp_root.icmpmsg_InEchos - || snmp_root.icmpmsg_OutEchos - || snmp_root.icmpmsg_InRouterAdvert - || snmp_root.icmpmsg_OutRouterAdvert - || snmp_root.icmpmsg_InRouterSelect - || snmp_root.icmpmsg_OutRouterSelect - || snmp_root.icmpmsg_InTimeExcds - || snmp_root.icmpmsg_OutTimeExcds - || snmp_root.icmpmsg_InParmProbs - || snmp_root.icmpmsg_OutParmProbs - || snmp_root.icmpmsg_InTimestamps - || snmp_root.icmpmsg_OutTimestamps - || snmp_root.icmpmsg_InTimestampReps - || snmp_root.icmpmsg_OutTimestampReps - ))) { + if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && + (snmp_root.icmpmsg_InEchoReps || + snmp_root.icmpmsg_OutEchoReps || + snmp_root.icmpmsg_InDestUnreachs || + snmp_root.icmpmsg_OutDestUnreachs || + snmp_root.icmpmsg_InRedirects || + snmp_root.icmpmsg_OutRedirects || + snmp_root.icmpmsg_InEchos || + snmp_root.icmpmsg_OutEchos || + snmp_root.icmpmsg_InRouterAdvert || + snmp_root.icmpmsg_OutRouterAdvert || + snmp_root.icmpmsg_InRouterSelect || + snmp_root.icmpmsg_OutRouterSelect || + snmp_root.icmpmsg_InTimeExcds || + snmp_root.icmpmsg_OutTimeExcds || + snmp_root.icmpmsg_InParmProbs || + snmp_root.icmpmsg_OutParmProbs || + snmp_root.icmpmsg_InTimestamps || + snmp_root.icmpmsg_OutTimestamps || + snmp_root.icmpmsg_InTimestampReps || + snmp_root.icmpmsg_OutTimestampReps || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmpmsg = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -677,7 +703,9 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && snmp_root.tcp_CurrEstab)) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_CurrEstab || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -709,7 +737,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InSegs || snmp_root.tcp_OutSegs))) { + if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_InSegs || + snmp_root.tcp_OutSegs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -744,7 +775,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InErrs || snmp_root.tcp_InCsumErrors || snmp_root.tcp_RetransSegs))) { + if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_InErrs || + snmp_root.tcp_InCsumErrors || + snmp_root.tcp_RetransSegs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -783,7 +818,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_ActiveOpens || snmp_root.tcp_PassiveOpens))) { + if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_ActiveOpens || + snmp_root.tcp_PassiveOpens || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_opens = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -819,7 +857,11 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_EstabResets || snmp_root.tcp_OutRsts || snmp_root.tcp_AttemptFails))) { + if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && + (snmp_root.tcp_EstabResets || + snmp_root.tcp_OutRsts || + snmp_root.tcp_AttemptFails || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_handshake = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -882,7 +924,10 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- // see http://net-snmp.sourceforge.net/docs/mibs/udp.html - if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.udp_InDatagrams || snmp_root.udp_OutDatagrams))) { + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.udp_InDatagrams || + snmp_root.udp_OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -917,14 +962,14 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && ( - snmp_root.udp_InErrors - || snmp_root.udp_NoPorts - || snmp_root.udp_RcvbufErrors - || snmp_root.udp_SndbufErrors - || snmp_root.udp_InCsumErrors - || snmp_root.udp_IgnoredMulti - ))) { + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && + (snmp_root.udp_InErrors || + snmp_root.udp_NoPorts || + snmp_root.udp_RcvbufErrors || + snmp_root.udp_SndbufErrors || + snmp_root.udp_InCsumErrors || + snmp_root.udp_IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -992,16 +1037,16 @@ int do_proc_net_snmp(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && ( - snmp_root.udplite_InDatagrams - || snmp_root.udplite_OutDatagrams - || snmp_root.udplite_NoPorts - || snmp_root.udplite_InErrors - || snmp_root.udplite_InCsumErrors - || snmp_root.udplite_RcvbufErrors - || snmp_root.udplite_SndbufErrors - || snmp_root.udplite_IgnoredMulti - ))) { + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && + (snmp_root.udplite_InDatagrams || + snmp_root.udplite_OutDatagrams || + snmp_root.udplite_NoPorts || + snmp_root.udplite_InErrors || + snmp_root.udplite_InCsumErrors || + snmp_root.udplite_RcvbufErrors || + snmp_root.udplite_SndbufErrors || + snmp_root.udplite_IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_packets = CONFIG_BOOLEAN_YES; { diff --git a/collectors/proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c index f0084aa2..445e0dca 100644 --- a/collectors/proc.plugin/proc_net_snmp6.c +++ b/collectors/proc.plugin/proc_net_snmp6.c @@ -277,7 +277,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (Ip6InOctets || Ip6OutOctets))) { + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && + (Ip6InOctets || + Ip6OutOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bandwidth = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -311,7 +314,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (Ip6InReceives || Ip6OutRequests || Ip6InDelivers || Ip6OutForwDatagrams))) { + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && + (Ip6InReceives || + Ip6OutRequests || + Ip6InDelivers || + Ip6OutForwDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -351,7 +359,11 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (Ip6FragOKs || Ip6FragFails || Ip6FragCreates))) { + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && + (Ip6FragOKs || + Ip6FragFails || + Ip6FragCreates || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsout = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_ok = NULL, @@ -389,13 +401,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO - && ( - Ip6ReasmOKs - || Ip6ReasmFails - || Ip6ReasmTimeout - || Ip6ReasmReqds - ))) { + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && + (Ip6ReasmOKs || + Ip6ReasmFails || + Ip6ReasmTimeout || + Ip6ReasmReqds || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_fragsin = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -436,17 +447,16 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO - && ( - Ip6InDiscards - || Ip6OutDiscards - || Ip6InHdrErrors - || Ip6InAddrErrors - || Ip6InUnknownProtos - || Ip6InTooBigErrors - || Ip6InTruncatedPkts - || Ip6InNoRoutes - ))) { + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && + (Ip6InDiscards || + Ip6OutDiscards || + Ip6InHdrErrors || + Ip6InAddrErrors || + Ip6InUnknownProtos || + Ip6InTooBigErrors || + Ip6InTruncatedPkts || + Ip6InNoRoutes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ip_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InDiscards = NULL, @@ -502,7 +512,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (Udp6InDatagrams || Udp6OutDatagrams))) { + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && + (Udp6InDatagrams || + Udp6OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -536,15 +549,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO - && ( - Udp6InErrors - || Udp6NoPorts - || Udp6RcvbufErrors - || Udp6SndbufErrors - || Udp6InCsumErrors - || Udp6IgnoredMulti - ))) { + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && + (Udp6InErrors || + Udp6NoPorts || + Udp6RcvbufErrors || + Udp6SndbufErrors || + Udp6InCsumErrors || + Udp6IgnoredMulti || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_RcvbufErrors = NULL, @@ -591,7 +603,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (UdpLite6InDatagrams || UdpLite6OutDatagrams))) { + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && + (UdpLite6InDatagrams || + UdpLite6OutDatagrams || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_packets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_received = NULL, @@ -625,15 +640,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO - && ( - UdpLite6InErrors - || UdpLite6NoPorts - || UdpLite6RcvbufErrors - || UdpLite6SndbufErrors - || Udp6InCsumErrors - || UdpLite6InCsumErrors - ))) { + if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO && + (UdpLite6InErrors || + UdpLite6NoPorts || + UdpLite6RcvbufErrors || + UdpLite6SndbufErrors || + Udp6InCsumErrors || + UdpLite6InCsumErrors || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_RcvbufErrors = NULL, @@ -677,7 +691,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastOctets || Ip6InMcastOctets))) { + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && + (Ip6OutMcastOctets || + Ip6InMcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InMcastOctets = NULL, @@ -712,7 +729,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (Ip6OutBcastOctets || Ip6InBcastOctets))) { + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && + (Ip6OutBcastOctets || + Ip6InBcastOctets || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_bcast = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InBcastOctets = NULL, @@ -747,7 +767,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastPkts || Ip6InMcastPkts))) { + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && + (Ip6OutMcastPkts || + Ip6InMcastPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_mcast_p = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Ip6InMcastPkts = NULL, @@ -782,7 +805,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && (Icmp6InMsgs || Icmp6OutMsgs))) { + if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && + (Icmp6InMsgs || + Icmp6OutMsgs || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Icmp6InMsgs = NULL, @@ -816,7 +842,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && (Icmp6InRedirects || Icmp6OutRedirects))) { + if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && + (Icmp6InRedirects || + Icmp6OutRedirects || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_redir = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_Icmp6InRedirects = NULL, @@ -850,20 +879,19 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InErrors - || Icmp6OutErrors - || Icmp6InCsumErrors - || Icmp6InDestUnreachs - || Icmp6InPktTooBigs - || Icmp6InTimeExcds - || Icmp6InParmProblems - || Icmp6OutDestUnreachs - || Icmp6OutPktTooBigs - || Icmp6OutTimeExcds - || Icmp6OutParmProblems - ))) { + if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO && + (Icmp6InErrors || + Icmp6OutErrors || + Icmp6InCsumErrors || + Icmp6InDestUnreachs || + Icmp6InPktTooBigs || + Icmp6InTimeExcds || + Icmp6InParmProblems || + Icmp6OutDestUnreachs || + Icmp6OutPktTooBigs || + Icmp6OutTimeExcds || + Icmp6OutParmProblems || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_errors = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InErrors = NULL, @@ -924,13 +952,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InEchos - || Icmp6OutEchos - || Icmp6InEchoReplies - || Icmp6OutEchoReplies - ))) { + if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO && + (Icmp6InEchos || + Icmp6OutEchos || + Icmp6InEchoReplies || + Icmp6OutEchoReplies || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_echos = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InEchos = NULL, @@ -970,15 +997,14 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InGroupMembQueries - || Icmp6OutGroupMembQueries - || Icmp6InGroupMembResponses - || Icmp6OutGroupMembResponses - || Icmp6InGroupMembReductions - || Icmp6OutGroupMembReductions - ))) { + if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO && + (Icmp6InGroupMembQueries || + Icmp6OutGroupMembQueries || + Icmp6InGroupMembResponses || + Icmp6OutGroupMembResponses || + Icmp6InGroupMembReductions || + Icmp6OutGroupMembReductions || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_groupmemb = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InQueries = NULL, @@ -1023,13 +1049,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InRouterSolicits - || Icmp6OutRouterSolicits - || Icmp6InRouterAdvertisements - || Icmp6OutRouterAdvertisements - ))) { + if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO && + (Icmp6InRouterSolicits || + Icmp6OutRouterSolicits || + Icmp6InRouterAdvertisements || + Icmp6OutRouterAdvertisements || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_router = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InSolicits = NULL, @@ -1069,13 +1094,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InNeighborSolicits - || Icmp6OutNeighborSolicits - || Icmp6InNeighborAdvertisements - || Icmp6OutNeighborAdvertisements - ))) { + if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO && + (Icmp6InNeighborSolicits || + Icmp6OutNeighborSolicits || + Icmp6InNeighborAdvertisements || + Icmp6OutNeighborAdvertisements || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_neighbor = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InSolicits = NULL, @@ -1115,7 +1139,10 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && (Icmp6InMLDv2Reports || Icmp6OutMLDv2Reports))) { + if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && + (Icmp6InMLDv2Reports || + Icmp6OutMLDv2Reports || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_mldv2 = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InMLDv2Reports = NULL, @@ -1149,19 +1176,18 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InType1 - || Icmp6InType128 - || Icmp6InType129 - || Icmp6InType136 - || Icmp6OutType1 - || Icmp6OutType128 - || Icmp6OutType129 - || Icmp6OutType133 - || Icmp6OutType135 - || Icmp6OutType143 - ))) { + if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO && + (Icmp6InType1 || + Icmp6InType128 || + Icmp6InType129 || + Icmp6InType136 || + Icmp6OutType1 || + Icmp6OutType128 || + Icmp6OutType129 || + Icmp6OutType133 || + Icmp6OutType135 || + Icmp6OutType143 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_icmp_types = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InType1 = NULL, @@ -1219,13 +1245,12 @@ int do_proc_net_snmp6(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO - && ( - Ip6InNoECTPkts - || Ip6InECT1Pkts - || Ip6InECT0Pkts - || Ip6InCEPkts - ))) { + if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO && + (Ip6InNoECTPkts || + Ip6InECT1Pkts || + Ip6InECT0Pkts || + Ip6InCEPkts || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ect = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; static RRDDIM *rd_InNoECTPkts = NULL, diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c index ff9cc523..994cbad7 100644 --- a/collectors/proc.plugin/proc_net_sockstat.c +++ b/collectors/proc.plugin/proc_net_sockstat.c @@ -218,7 +218,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.sockets_used)) { + if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.sockets_used || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -250,7 +252,12 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat_root.tcp_inuse || sockstat_root.tcp_orphan || sockstat_root.tcp_tw || sockstat_root.tcp_alloc))) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.tcp_inuse || + sockstat_root.tcp_orphan || + sockstat_root.tcp_tw || + sockstat_root.tcp_alloc || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -291,7 +298,8 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.tcp_mem)) { + if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.tcp_mem || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -323,7 +331,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_inuse)) { + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udp_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -355,7 +365,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_mem)) { + if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udp_mem || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -387,7 +399,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udplite_inuse)) { + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.udplite_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -419,7 +433,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.raw_inuse)) { + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.raw_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_raw_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -451,7 +467,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_inuse)) { + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat_root.frag_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -483,7 +501,9 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_memory)) { + if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && + (sockstat_root.frag_memory || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_mem = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c index 687b9bde..ce8c9e09 100644 --- a/collectors/proc.plugin/proc_net_sockstat6.c +++ b/collectors/proc.plugin/proc_net_sockstat6.c @@ -111,7 +111,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat6_root.tcp6_inuse))) { + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.tcp6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_tcp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -143,7 +145,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udp6_inuse)) { + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.udp6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udp_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -175,7 +179,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udplite6_inuse)) { + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.udplite6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_udplite_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -207,7 +213,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.raw6_inuse)) { + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.raw6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_raw_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -239,7 +247,9 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { // ------------------------------------------------------------------------ - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.frag6_inuse)) { + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && + (sockstat6_root.frag6_inuse || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_frag_sockets = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c index 312ded5b..f5030f99 100644 --- a/collectors/proc.plugin/proc_net_stat_synproxy.c +++ b/collectors/proc.plugin/proc_net_stat_synproxy.c @@ -59,7 +59,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_entries == CONFIG_BOOLEAN_AUTO && events) || do_entries == CONFIG_BOOLEAN_YES) { + if(do_entries == CONFIG_BOOLEAN_YES || (do_entries == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_entries = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -89,7 +90,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_syns == CONFIG_BOOLEAN_AUTO && events) || do_syns == CONFIG_BOOLEAN_YES) { + if(do_syns == CONFIG_BOOLEAN_YES || (do_syns == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_syns = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -119,7 +121,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_reopened == CONFIG_BOOLEAN_AUTO && events) || do_reopened == CONFIG_BOOLEAN_YES) { + if(do_reopened == CONFIG_BOOLEAN_YES || (do_reopened == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_reopened = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; @@ -149,7 +152,8 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if((do_cookies == CONFIG_BOOLEAN_AUTO && events) || do_cookies == CONFIG_BOOLEAN_YES) { + if(do_cookies == CONFIG_BOOLEAN_YES || (do_cookies == CONFIG_BOOLEAN_AUTO && + (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_cookies = CONFIG_BOOLEAN_YES; static RRDSET *st = NULL; diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c index c6557289..32ff36b7 100644 --- a/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -124,6 +124,8 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename); show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO); + if(show_zero_charts == CONFIG_BOOLEAN_AUTO && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES) + show_zero_charts = CONFIG_BOOLEAN_YES; if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES)) do_zfs_stats = 1; } diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c index a9712b24..7def02dd 100644 --- a/collectors/proc.plugin/proc_vmstat.c +++ b/collectors/proc.plugin/proc_vmstat.c @@ -43,7 +43,9 @@ int do_proc_vmstat(int update_every, usec_t dt) { arl_expect(arl_base, "pswpin", &pswpin); arl_expect(arl_base, "pswpout", &pswpout); - if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && get_numa_node_count() >= 2)) { + if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && + (get_numa_node_count() >= 2 || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { arl_expect(arl_base, "numa_foreign", &numa_foreign); arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local); arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults); @@ -91,7 +93,9 @@ int do_proc_vmstat(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(pswpin || pswpout || do_swapio == CONFIG_BOOLEAN_YES) { + if(do_swapio == CONFIG_BOOLEAN_YES || (do_swapio == CONFIG_BOOLEAN_AUTO && + (pswpin || pswpout || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_swapio = CONFIG_BOOLEAN_YES; static RRDSET *st_swapio = NULL; diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c index 03cbfff8..b1114837 100644 --- a/collectors/proc.plugin/sys_devices_system_edac_mc.c +++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -128,7 +128,8 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && ce_sum > 0)) { + if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && + (ce_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ce = CONFIG_BOOLEAN_YES; static RRDSET *ce_st = NULL; @@ -166,7 +167,8 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { // -------------------------------------------------------------------- - if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && ue_sum > 0)) { + if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && + (ue_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_ue = CONFIG_BOOLEAN_YES; static RRDSET *ue_st = NULL; diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c index 6e6d0acc..ff408ed8 100644 --- a/collectors/proc.plugin/sys_devices_system_node.c +++ b/collectors/proc.plugin/sys_devices_system_node.c @@ -83,7 +83,8 @@ int do_proc_sys_devices_system_node(int update_every, usec_t dt) { hash_numa_miss = simple_hash("numa_miss"); } - if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { + if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && + (numa_node_count >= 2 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { for(m = numa_root; m; m = m->next) { if(m->numastat_filename) { diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c index cb25ad44..4e58a1a4 100644 --- a/collectors/proc.plugin/sys_fs_btrfs.c +++ b/collectors/proc.plugin/sys_fs_btrfs.c @@ -542,7 +542,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/disks - if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && node->all_disks_total && node->allocation_data_disk_total)) { + if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && + ((node->all_disks_total && node->allocation_data_disk_total) || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_disks = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_disks)) { @@ -598,7 +600,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/data - if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && node->allocation_data_total_bytes)) { + if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && + (node->allocation_data_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_data = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_data)) { @@ -639,7 +643,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/metadata - if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && node->allocation_metadata_total_bytes)) { + if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && + (node->allocation_metadata_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_metadata = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_metadata)) { @@ -682,7 +688,9 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { // -------------------------------------------------------------------- // allocation/system - if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && node->allocation_system_total_bytes)) { + if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && + (node->allocation_system_total_bytes || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { do_allocation_system = CONFIG_BOOLEAN_YES; if(unlikely(!node->st_allocation_system)) { diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c index 0b64987c..0a93f54e 100644 --- a/collectors/proc.plugin/sys_kernel_mm_ksm.c +++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c @@ -89,7 +89,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { offered = pages_sharing + pages_shared + pages_unshared + pages_volatile; saved = pages_sharing; - if(unlikely(!offered /*|| !pages_to_scan*/)) return 0; + if(unlikely(!offered /*|| !pages_to_scan*/ && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_NO)) return 0; // -------------------------------------------------------------------- @@ -192,7 +192,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { else rrdset_next(st_mem_ksm_ratios); - rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered); + rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, offered ? (saved * 1000000) / offered : 0); rrdset_done(st_mem_ksm_ratios); } diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf index 53858ae2..2dded40a 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.conf +++ b/collectors/python.d.plugin/mongodb/mongodb.conf @@ -84,9 +84,9 @@ local: port : 27017 # authsample: -# name : 'secure' -# host : 'mongodb.example.com' -# port : 27017 +# name : 'secure' +# host : 'mongodb.example.com' +# port : 27017 # authdb : 'admin' -# user : 'monitor' -# password : 'supersecret' +# user : 'monitor' +# pass : 'supersecret' diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py index 27519b76..3b94fcdf 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py @@ -4,6 +4,7 @@ # Author: Ilya Mashchenko (ilyam8) # SPDX-License-Identifier: GPL-3.0-or-later +import errno import socket try: @@ -181,7 +182,8 @@ class SocketService(SimpleService): self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all self._sock.close() except Exception as error: - self.error(error) + if not (hasattr(error, 'errno') and error.errno == errno.ENOTCONN): + self.error(error) self._sock = None def _send(self, request=None): diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py index ee2fb68b..80cc1cf1 100644 --- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py @@ -131,6 +131,15 @@ class Server: return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d) +# https://pypi.org/project/rethinkdb/2.4.0/ +# rdb.RethinkDB() can be used as rdb drop in replacement. +# https://github.com/rethinkdb/rethinkdb-python#quickstart +def get_rethinkdb(): + if hasattr(rdb, 'RethinkDB'): + return rdb.RethinkDB() + return rdb + + class Service(SimpleService): def __init__(self, configuration=None, name=None): SimpleService.__init__(self, configuration=configuration, name=name) @@ -141,6 +150,7 @@ class Service(SimpleService): self.user = self.configuration.get('user', 'admin') self.password = self.configuration.get('password') self.timeout = self.configuration.get('timeout', 2) + self.rdb = None self.conn = None self.alive = True @@ -149,6 +159,9 @@ class Service(SimpleService): self.error('"rethinkdb" module is needed to use rethinkdbs.py') return False + self.debug("rethinkdb driver version {0}".format(rdb.__version__)) + self.rdb = get_rethinkdb() + if not self.connect(): return None @@ -196,14 +209,14 @@ class Service(SimpleService): def get_stats(self): try: - return list(rdb.db('rethinkdb').table('stats').run(self.conn).items) + return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items) except rdb.errors.ReqlError: self.alive = False return None def connect(self): try: - self.conn = rdb.connect( + self.conn = self.rdb.connect( host=self.host, port=self.port, user=self.user, diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py index 02e88e6a..6b54ea60 100644 --- a/collectors/python.d.plugin/sensors/sensors.chart.py +++ b/collectors/python.d.plugin/sensors/sensors.chart.py @@ -92,7 +92,7 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = list() self.definitions = dict() - self.chips = list() + self.chips = configuration.get('chips') def get_data(self): data = dict() diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py index 12f756c5..f121ab2e 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -558,6 +558,7 @@ class DiskLogFile: class BaseDisk: def __init__(self, name, log_file): + self.raw_name = name self.name = re.sub(r'_+', '_', name) self.log_file = log_file self.attrs = list() @@ -566,8 +567,8 @@ class BaseDisk: def __eq__(self, other): if isinstance(other, BaseDisk): - return self.name == other.name - return self.name == other + return self.raw_name == other.raw_name + return self.raw_name == other def __ne__(self, other): return not self == other @@ -657,7 +658,7 @@ class Service(SimpleService): not disk.log_file.is_active(current_time, self.age), ] ): - self.disks.remove(disk.name) + self.disks.remove(disk.raw_name) self.remove_disk_from_charts(disk) def scan(self): @@ -681,9 +682,11 @@ class Service(SimpleService): path = os.path.join(self.log_path, full_name) if name in self.disks: + self.debug('skipping {0}: already in disks'.format(full_name)) return None if [p for p in self.exclude if p in name]: + self.debug('skipping {0}: filtered by `exclude` option'.format(full_name)) return None if not os.access(path, os.R_OK): @@ -747,5 +750,4 @@ class Service(SimpleService): if not chart_id or chart_id not in self.charts: continue - # TODO: can't delete dimension - self.charts[chart_id].hide_dimension('{0}_{1}'.format(disk.name, attr.name)) + self.charts[chart_id].del_dimension('{0}_{1}'.format(disk.name, attr.name)) diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py index dade2b20..6e5a22c5 100644 --- a/collectors/python.d.plugin/unbound/unbound.chart.py +++ b/collectors/python.d.plugin/unbound/unbound.chart.py @@ -253,15 +253,19 @@ class Service(SocketService): else: self.request = b'UBCT1 status\n' raw = self._get_raw_data() - for line in raw.splitlines(): - if line.startswith('threads'): - self.threads = int(line.split()[1]) - self._generate_perthread_charts() - break - if self.threads is None: - self.info('Unable to auto-detect thread counts, disabling per-thread stats.') - self.perthread = False - self.request = tmp + if raw is None: + result = False + self.warning('Received no data from socket.') + else: + for line in raw.splitlines(): + if line.startswith('threads'): + self.threads = int(line.split()[1]) + self._generate_perthread_charts() + break + if self.threads is None: + self.info('Unable to auto-detect thread counts, disabling per-thread stats.') + self.perthread = False + self.request = tmp return result @staticmethod @@ -274,10 +278,13 @@ class Service(SocketService): raw = self._get_raw_data() data = dict() tmp = dict() - for line in raw.splitlines(): - stat = line.split('=') - tmp[stat[0]] = stat[1] - for item in self.statmap: - if item in tmp: - data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + if raw is not None: + for line in raw.splitlines(): + stat = line.split('=') + tmp[stat[0]] = stat[1] + for item in self.statmap: + if item in tmp: + data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + else: + self.warning('Received no data from socket.') return data diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index 534466a0..78f0e980 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -1067,7 +1067,7 @@ static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) { } static STATSD_APP_CHART_DIM *add_dimension_to_app_chart( - STATSD_APP *app + STATSD_APP *app __maybe_unused , STATSD_APP_CHART *chart , const char *metric_name , const char *dim_name diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md index e71944e3..4133b4f8 100644 --- a/collectors/tc.plugin/README.md +++ b/collectors/tc.plugin/README.md @@ -191,7 +191,7 @@ Add the following configuration option in `/etc/netdata.conf`: Finally, create `/etc/netdata/tc-qos-helper.conf` with this content: ```tc_show="class"``` -Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. +Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c index 1a0ced9a..50383f4c 100644 --- a/collectors/tc.plugin/plugin_tc.c +++ b/collectors/tc.plugin/plugin_tc.c @@ -382,7 +382,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // bytes - if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && bytes_sum)) { + if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && + (bytes_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_bytes = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_bytes)) @@ -425,7 +427,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // packets - if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && packets_sum)) { + if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && + (packets_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_packets = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_packets)) { @@ -478,7 +482,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // dropped - if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && dropped_sum)) { + if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && + (dropped_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_dropped = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_dropped)) { @@ -531,7 +537,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // tokens - if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && tokens_sum)) { + if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && + (tokens_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_tokens = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_tokens)) { @@ -585,7 +593,9 @@ static inline void tc_device_commit(struct tc_device *d) { // -------------------------------------------------------------------- // ctokens - if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && ctokens_sum)) { + if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && + (ctokens_sum || + netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { d->enabled_ctokens = CONFIG_BOOLEAN_YES; if(unlikely(!d->st_ctokens)) { @@ -874,9 +884,6 @@ void *tc_main(void *ptr) { uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP"); uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME"); uint32_t WORKTIME_HASH = simple_hash("WORKTIME"); -#ifdef DETACH_PLUGINS_FROM_NETDATA - uint32_t MYPID_HASH = simple_hash("MYPID"); -#endif uint32_t first_hash; snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir); @@ -1119,17 +1126,6 @@ void *tc_main(void *ptr) { rrdset_done(sttime); } -#ifdef DETACH_PLUGINS_FROM_NETDATA - else if(unlikely(first_hash == MYPID_HASH && (strcmp(words[0], "MYPID") == 0))) { - // debug(D_TC_LOOP, "MYPID line '%s'", words[1]); - char *id = words[1]; - pid_t pid = atol(id); - - if(likely(pid)) tc_child_pid = pid; - - debug(D_TC_LOOP, "TC: Child PID is %d.", tc_child_pid); - } -#endif //else { // debug(D_TC_LOOP, "IGNORED line"); //} diff --git a/configs.signatures b/configs.signatures index b0ded05e..e29883ac 100644 --- a/configs.signatures +++ b/configs.signatures @@ -381,7 +381,7 @@ declare -A configs_signatures=( ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf' ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf' ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf' - ['ce285c90747428ee5da4efb547418dda']='health.d/dbengine.conf' + ['93674f3206872ae9c43ecbc54988413b']='health.d/dbengine.conf' ['7fb8184d56a27040e73261ed9c6fc76f']='health_alarm_notify.conf' ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf' ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf' diff --git a/configure.ac b/configure.ac index b922ad5b..c65e406f 100644 --- a/configure.ac +++ b/configure.ac @@ -437,6 +437,14 @@ else AC_DEFINE_UNQUOTED([unlikely(x)], [(x)], [gcc branch optimization]) fi +if test "${GCC}" = "yes"; then + AC_DEFINE([__always_unused], [__attribute__((unused))], [gcc unused attribute]) + AC_DEFINE([__maybe_unused], [__attribute__((unused))], [gcc unused attribute]) +else + AC_DEFINE([__always_unused], [], [dummy unused attribute]) + AC_DEFINE([__maybe_unused], [], [dummy unused attribute]) +fi + if test "${enable_pedantic}" = "yes"; then enable_strict="yes" CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long" @@ -816,17 +824,17 @@ if test "${have_libaws_cpp_sdk_core}" = "yes" -a "${have_libcrypto}" = "yes" -a CXXFLAGS="${CXXFLAGS} -std=c++11" AC_TRY_LINK( - [ - #include <aws/core/Aws.h> - #include <aws/core/client/ClientConfiguration.h> - #include <aws/core/auth/AWSCredentials.h> - #include <aws/core/utils/Outcome.h> - #include <aws/kinesis/KinesisClient.h> - #include <aws/kinesis/model/PutRecordRequest.h> - ], - [Aws::Kinesis::Model::PutRecordRequest request;], - [have_libaws_cpp_sdk_kinesis=yes], - [have_libaws_cpp_sdk_kinesis=no] + [ + #include <aws/core/Aws.h> + #include <aws/core/client/ClientConfiguration.h> + #include <aws/core/auth/AWSCredentials.h> + #include <aws/core/utils/Outcome.h> + #include <aws/kinesis/KinesisClient.h> + #include <aws/kinesis/model/PutRecordRequest.h> + ], + [Aws::Kinesis::Model::PutRecordRequest request;], + [have_libaws_cpp_sdk_kinesis=yes], + [have_libaws_cpp_sdk_kinesis=no] ) LIBS="${save_LIBS}" @@ -879,12 +887,40 @@ PKG_CHECK_MODULES( [have_libprotobuf=no] ) -PKG_CHECK_MODULES( - [SNAPPY], - [snappy], - [have_libsnappy=yes], - [have_libsnappy=no] -) +AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy]) + + AC_LANG_SAVE + AC_LANG_CPLUSPLUS + save_LIBS="${LIBS}" + LIBS="-lsnappy" + save_CXXFLAGS="${CXXFLAGS}" + CXXFLAGS="${CXXFLAGS} -std=c++11" + + AC_TRY_LINK( + [ + #include <stdlib.h> + #include <snappy.h> + ], + [ + const char *input = "test"; + size_t compressed_length; + char *buffer = (char *)malloc(5 * sizeof(char)); + snappy::RawCompress(input, 4, buffer, &compressed_length); + free(buffer); + ], + [ + have_libsnappy=yes + SNAPPY_CFLAGS="" + SNAPPY_LIBS="-lsnappy" + ], + [have_libsnappy=no] + ) + + LIBS="${save_LIBS}" + CXXFLAGS="${save_CXXFLAGS}" + AC_LANG_RESTORE + +AC_MSG_RESULT([${have_libsnappy}]) AC_PATH_PROG([PROTOC], [protoc], [no]) AS_IF( @@ -919,7 +955,7 @@ if test "${enable_backend_prometeus_remote_write}" != "no" -a "${have_libprotobu AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability]) OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS}" CXX11FLAG="-std=c++11" - OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS} " + OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS}" else enable_backend_prometheus_remote_write="no" fi diff --git a/contrib/debian/changelog b/contrib/debian/changelog new file mode 100644 index 00000000..d9cf8bdb --- /dev/null +++ b/contrib/debian/changelog @@ -0,0 +1,5 @@ +netdata (PREVIOUS_PACKAGE_VERSION) unstable; urgency=medium + + * Initial Release + +-- Netdata Builder <bot@netdata.cloud> PREVIOUS_PACKAGE_DATE diff --git a/contrib/debian/control b/contrib/debian/control index 0f4f1bc0..ed5bd94b 100644 --- a/contrib/debian/control +++ b/contrib/debian/control @@ -4,18 +4,51 @@ Build-Depends: debhelper (>= 9), dh-systemd (>= 1.5), dpkg-dev (>= 1.13.19), zlib1g-dev, - uuid-dev + uuid-dev, + libuv1-dev, + liblz4-dev, + libjudy-dev, + libssl-dev, + libmnl-dev, + libjson-c-dev, + libcups2-dev, + libipmimonitoring-dev, + libnetfilter-acct-dev, + libsnappy-dev, + libprotobuf-dev, + libprotoc-dev, + autogen, + autoconf, + automake, + pkg-config, + curl, + gcc, + g++ Section: net Priority: optional -Maintainer: Costa Tsaousis <costa@tsaousis.gr> +Maintainer: Netdata Builder <bot@netdata.cloud> Standards-Version: 3.9.6 -Homepage: https://github.com/netdata/netdata/wiki +Homepage: https://netdata.cloud Package: netdata Architecture: any Depends: adduser, libcap2-bin (>= 1:2.0), lsb-base (>= 3.1-23.2), + zlib1g, + libuuid1, + libuv1, + liblz4-1, + libjudydebian1, + openssl, + libmnl0, + libjson-c3, + cups, + freeipmi, + libnetfilter-acct1, + libprotobuf-c1, + libsnappy1v5, + libprotoc10, ${misc:Depends}, ${shlibs:Depends} Description: real-time charts for system monitoring diff --git a/contrib/debian/control.jessie b/contrib/debian/control.jessie new file mode 100644 index 00000000..ced85d20 --- /dev/null +++ b/contrib/debian/control.jessie @@ -0,0 +1,56 @@ +Source: netdata +Build-Depends: debhelper (>= 9), + dh-autoreconf, + dh-systemd (>= 1.5), + dpkg-dev (>= 1.13.19), + zlib1g-dev, + uuid-dev, + liblz4-dev, + libjudy-dev, + libssl-dev, + libmnl-dev, + libjson-c-dev, + libcups2-dev, + libipmimonitoring-dev, + libnetfilter-acct-dev, + libsnappy-dev, + libprotobuf-dev, + libprotoc-dev, + autogen, + autoconf, + automake, + pkg-config, + curl, + gcc, + g++ +Section: net +Priority: optional +Maintainer: Costa Tsaousis <costa@tsaousis.gr> +Standards-Version: 3.9.6 +Homepage: https://github.com/netdata/netdata/wiki + +Package: netdata +Architecture: any +Depends: adduser, + libcap2-bin (>= 1:2.0), + lsb-base (>= 3.1-23.2), + zlib1g, + libuuid1, + liblz4-1, + libjudydebian1, + openssl, + libmnl0, + libjson-c3, + cups, + freeipmi, + libnetfilter-acct1, + libprotobuf-c1, + libsnappy1v5, + libprotoc10, + ${misc:Depends}, + ${shlibs:Depends} +Description: real-time charts for system monitoring + Netdata is a daemon that collects data in realtime (per second) + and presents a web site to view and analyze them. The presentation + is also real-time and full of interactive charts that precisely + render all collected values. diff --git a/contrib/debian/install_go.sh b/contrib/debian/install_go.sh new file mode 100755 index 00000000..17a3b409 --- /dev/null +++ b/contrib/debian/install_go.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +LIB_DIR="$1" +LIBEXEC_DIR="$2" + +# ############################################################ +# Package Go within netdata (TBD: Package it separately) +safe_sha256sum() { + # Within the contexct of the installer, we only use -c option that is common between the two commands + # We will have to reconsider if we start non-common options + if command -v sha256sum >/dev/null 2>&1; then + sha256sum $@ + elif command -v shasum >/dev/null 2>&1; then + shasum -a 256 $@ + else + fatal "I could not find a suitable checksum binary to use" + fi +} + +download_go() { + url="${1}" + dest="${2}" + + if command -v curl >/dev/null 2>&1; then + curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}" + elif command -v wget >/dev/null 2>&1; then + wget -T 15 -O - "${url}" > "${dest}" + else + echo >&2 + echo >&2 "Downloading go.d plugin from '${url}' failed because of missing mandatory packages." + echo >&2 "Either add packages or disable it by issuing '--disable-go' in the installer" + echo >&2 + exit 1 + fi +} + +install_go() { + # When updating this value, ensure correct checksums in packaging/go.d.checksums + GO_PACKAGE_VERSION="v0.7.0" + ARCH_MAP=( + 'i386::386' + 'i686::386' + 'x86_64::amd64' + 'aarch64::arm64' + 'armv64::arm64' + 'armv6l::arm' + 'armv7l::arm' + 'armv5tel::arm' + ) + + if [ -z "${NETDATA_DISABLE_GO+x}" ]; then + echo >&2 "Install go.d.plugin" + ARCH=$(uname -m) + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + + for index in "${ARCH_MAP[@]}" ; do + KEY="${index%%::*}" + VALUE="${index##*::}" + if [ "$KEY" = "$ARCH" ]; then + ARCH="${VALUE}" + break + fi + done + tmp=$(mktemp -d /tmp/netdata-go-XXXXXX) + GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}" + download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}" + download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz" + + if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then + echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer" + echo >&2 + return 1 + fi + + grep "${GO_PACKAGE_BASENAME}\$" "packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null + grep "config.tar.gz" "packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null + + # Checksum validation + if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then + + echo >&2 "go.d plugin checksum validation failure." + echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer" + echo >&2 + + echo "go.d.plugin package files checksum validation failed." + exit 1 + fi + + # Install files + tar -xf "${tmp}/config.tar.gz" -C "${LIB_DIR}/conf.d/" + mv "${tmp}/$GO_PACKAGE_BASENAME" "${LIBEXEC_DIR}/plugins.d/go.d.plugin" + fi + return 0 +} + +install_go diff --git a/contrib/debian/netdata.postinst.in b/contrib/debian/netdata.postinst.in index 29615f54..44b53ccf 100644 --- a/contrib/debian/netdata.postinst.in +++ b/contrib/debian/netdata.postinst.in @@ -28,9 +28,10 @@ case "$1" in fi dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry - chown -R root:netdata /usr/share/netdata/* - chown -R root:netdata /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d - setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d/apps.plugin + chown -R root:netdata /usr/share/netdata + chown -R root:netdata /usr/libexec/netdata/plugins.d + chown -R root:netdata /var/lib/netdata/www + setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/libexec/netdata/plugins.d/apps.plugin #PERMS# ;; diff --git a/contrib/debian/rules b/contrib/debian/rules index c1932396..88a8ab36 100755 --- a/contrib/debian/rules +++ b/contrib/debian/rules @@ -17,7 +17,9 @@ TOP = $(CURDIR)/debian/netdata #dh $@ --with autoreconf override_dh_auto_configure: - dh_auto_configure -- --with-math --with-webdir=/var/lib/netdata/www + autoreconf -ivf + dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var \ + --libexecdir=/usr/libexec --with-user=netdata --with-math --with-webdir=/var/lib/netdata/www debian/%.postinst: debian/%.postinst.in sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' $< > $@ @@ -34,7 +36,7 @@ override_dh_install: debian/netdata.postinst mkdir -p "$(TOP)/usr/share/netdata" for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type d -printf '%f '); do \ echo Relocating $$D; \ - mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/$$D"; \ + mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/www/$$D"; \ ln -s "/usr/share/netdata/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \ done @@ -51,6 +53,10 @@ override_dh_install: debian/netdata.postinst done sed -i "/^#PERMS#/d" $(CURDIR)/debian/netdata.postinst + # Install go + # + debian/install_go.sh $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/ $(TOP)/usr/libexec/netdata + override_dh_installdocs: dh_installdocs @@ -58,10 +64,11 @@ override_dh_installdocs: -name README.md \ -not -path './.travis/*' \ -not -path './debian/*' \ + -not -path './contrib/*' \ -exec cp \ - --parents \ - --target $(TOP)/usr/share/doc/netdata/ \ - {} \; + --parents \ + --target $(TOP)/usr/share/doc/netdata/ \ + {} \; override_dh_fixperms: dh_fixperms @@ -69,7 +76,10 @@ override_dh_fixperms: # apps.plugin should only be runnable by the netdata user. It will be # given extra capabilities in the postinst script. # - chmod 0754 $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/apps.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/apps.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/freeipmi.plugin + chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/perf.plugin + chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/go.d.plugin override_dh_installlogrotate: cp system/netdata.logrotate debian/netdata.logrotate diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in index 7d73f6d6..f16c85a4 100755 --- a/daemon/anonymous-statistics.sh.in +++ b/daemon/anonymous-statistics.sh.in @@ -25,24 +25,6 @@ fi # Shorten version for easier reporting NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v') -echo "&av=${NETDATA_VERSION}\ -&ec=${ACTION}\ -&ea=${ACTION_RESULT}\ -&el=${ACTION_DATA}\ -&cd1=${NETDATA_SYSTEM_OS_NAME}\ -&cd2=${NETDATA_SYSTEM_OS_ID}\ -&cd3=${NETDATA_SYSTEM_OS_ID_LIKE}\ -&cd4=${NETDATA_SYSTEM_OS_VERSION}\ -&cd5=${NETDATA_SYSTEM_OS_VERSION_ID}\ -&cd6=${NETDATA_SYSTEM_OS_DETECTION}\ -&cd7=${NETDATA_SYSTEM_KERNEL_NAME}\ -&cd8=${NETDATA_SYSTEM_KERNEL_VERSION}\ -&cd9=${NETDATA_SYSTEM_ARCHITECTURE}\ -&cd10=${NETDATA_SYSTEM_VIRTUALIZATION}\ -&cd11=${NETDATA_SYSTEM_VIRT_DETECTION}\ -&cd12=${NETDATA_SYSTEM_CONTAINER}\ -&cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}" >> /tmp/as.log - # ------------------------------------------------------------------------------------------------- # send the anonymous statistics to GA # https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters diff --git a/daemon/common.h b/daemon/common.h index a15ddb87..dfbd6cfe 100644 --- a/daemon/common.h +++ b/daemon/common.h @@ -78,6 +78,7 @@ extern char *netdata_configured_varlib_dir; extern char *netdata_configured_home_dir; extern char *netdata_configured_host_prefix; extern char *netdata_configured_timezone; +extern int netdata_zero_metrics_enabled; extern int netdata_anonymous_statistics_enabled; int netdata_ready; diff --git a/daemon/config/README.md b/daemon/config/README.md index 4778cad2..c36a5b6d 100644 --- a/daemon/config/README.md +++ b/daemon/config/README.md @@ -74,6 +74,7 @@ gap when lost iterations above | `1` | cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data. delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. +enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. ### [web] section options @@ -128,7 +129,7 @@ The configuration options for plugins appear in sections following the pattern ` Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information. -Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. +Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. #### External plugins diff --git a/daemon/main.c b/daemon/main.c index 0ced9081..bd0970fd 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -2,6 +2,7 @@ #include "common.h" +int netdata_zero_metrics_enabled; int netdata_anonymous_statistics_enabled; struct config netdata_config = { @@ -1214,6 +1215,8 @@ int main(int argc, char **argv) { web_server_config_options(); + netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_BOOLEAN_NO); + for (i = 0; static_threads[i].name != NULL ; i++) { struct netdata_static_thread *st = &static_threads[i]; @@ -1227,7 +1230,7 @@ int main(int argc, char **argv) { info("netdata initialization completed. Enjoy real-time performance monitoring!"); netdata_ready = 1; - + send_statistics("START", "-", "-"); // ------------------------------------------------------------------------ diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c index 30eaa0ec..d6e4f317 100644 --- a/database/engine/journalfile.c +++ b/database/engine/journalfile.c @@ -3,13 +3,18 @@ static void flush_transaction_buffer_cb(uv_fs_t* req) { - struct generic_io_descriptor *io_descr; + struct generic_io_descriptor *io_descr = req->data; + struct rrdengine_worker_config* wc = req->loop->data; + struct rrdengine_instance *ctx = wc->ctx; debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); if (req->result < 0) { - fatal("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); + } else { + debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); } - io_descr = req->data; uv_fs_req_cleanup(req); free(io_descr->buf); @@ -348,6 +353,7 @@ static unsigned replay_transaction(struct rrdengine_instance *ctx, struct rrdeng ret = crc32cmp(jf_trailer->checksum, crc); debug(D_RRDENGINE, "Transaction %"PRIu64" was read from disk. CRC32 check: %s", *id, ret ? "FAILED" : "SUCCEEDED"); if (unlikely(ret)) { + error("Transaction %"PRIu64" was read from disk. CRC32 check: FAILED", *id); return size_bytes; } switch (jf_header->type) { diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c index 0f2dceaa..221216bb 100644 --- a/database/engine/rrdengine.c +++ b/database/engine/rrdengine.c @@ -37,24 +37,29 @@ void read_extent_cb(uv_fs_t* req) unsigned i, j, count; void *page, *uncompressed_buf = NULL; uint32_t payload_length, payload_offset, page_offset, uncompressed_payload_length; + uint8_t have_read_error = 0; /* persistent structures */ struct rrdeng_df_extent_header *header; struct rrdeng_df_extent_trailer *trailer; uLong crc; xt_io_descr = req->data; - if (req->result < 0) { - error("%s: uv_fs_read: %s", __func__, uv_strerror((int)req->result)); - goto cleanup; - } - header = xt_io_descr->buf; payload_length = header->payload_length; count = header->number_of_pages; - payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count; - trailer = xt_io_descr->buf + xt_io_descr->bytes - sizeof(*trailer); + + if (req->result < 0) { + struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile; + + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + have_read_error = 1; + error("%s: uv_fs_read - %s - extent at offset %"PRIu64"(%u) in datafile %u-%u.", __func__, + uv_strerror((int)req->result), xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno); + goto after_crc_check; + } crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, xt_io_descr->buf, xt_io_descr->bytes - sizeof(*trailer)); ret = crc32cmp(trailer->checksum, crc); @@ -66,12 +71,17 @@ void read_extent_cb(uv_fs_t* req) } #endif if (unlikely(ret)) { - /* TODO: handle errors */ - exit(UV_EIO); - goto cleanup; + struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile; + + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); + have_read_error = 1; + error("%s: Extent at offset %"PRIu64"(%u) was read from datafile %u-%u. CRC32 check: FAILED", __func__, + xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno); } - if (RRD_NO_COMPRESSION != header->compression_algorithm) { +after_crc_check: + if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) { uncompressed_payload_length = 0; for (i = 0 ; i < count ; ++i) { uncompressed_payload_length += header->descr[i].page_length; @@ -99,7 +109,10 @@ void read_extent_cb(uv_fs_t* req) page_offset += header->descr[j].page_length; } /* care, we don't hold the descriptor mutex */ - if (RRD_NO_COMPRESSION == header->compression_algorithm) { + if (have_read_error) { + /* Applications should make sure NULL values match 0 as does SN_EMPTY_SLOT */ + memset(page, 0, descr->page_length); + } else if (RRD_NO_COMPRESSION == header->compression_algorithm) { (void) memcpy(page, xt_io_descr->buf + payload_offset + page_offset, descr->page_length); } else { (void) memcpy(page, uncompressed_buf + page_offset, descr->page_length); @@ -118,12 +131,11 @@ void read_extent_cb(uv_fs_t* req) } rrdeng_page_descr_mutex_unlock(ctx, descr); } - if (RRD_NO_COMPRESSION != header->compression_algorithm) { + if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) { freez(uncompressed_buf); } if (xt_io_descr->completion) complete(xt_io_descr->completion); -cleanup: uv_fs_req_cleanup(req); free(xt_io_descr->buf); freez(xt_io_descr); @@ -246,8 +258,9 @@ void flush_pages_cb(uv_fs_t* req) xt_io_descr = req->data; if (req->result < 0) { + ++ctx->stats.io_errors; + rrd_stat_atomic_add(&global_io_errors, 1); error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); - goto cleanup; } #ifdef NETDATA_INTERNAL_CHECKS { @@ -279,7 +292,6 @@ void flush_pages_cb(uv_fs_t* req) } if (xt_io_descr->completion) complete(xt_io_descr->completion); -cleanup: uv_fs_req_cleanup(req); free(xt_io_descr->buf); freez(xt_io_descr); diff --git a/database/rrddim.c b/database/rrddim.c index 088c80d0..09f364b0 100644 --- a/database/rrddim.c +++ b/database/rrddim.c @@ -466,7 +466,7 @@ inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) { rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); } -inline void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd) { +inline void rrddim_isnot_obsolete(RRDSET *st __maybe_unused, RRDDIM *rd) { debug(D_RRD_CALLS, "rrddim_isnot_obsolete() for chart %s, dimension %s", st->name, rd->name); rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE); @@ -475,7 +475,7 @@ inline void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd) { // ---------------------------------------------------------------------------- // RRDDIM - collect values for a dimension -inline collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value) { +inline collected_number rrddim_set_by_pointer(RRDSET *st __maybe_unused, RRDDIM *rd, collected_number value) { debug(D_RRD_CALLS, "rrddim_set_by_pointer() for chart %s, dimension %s, value " COLLECTED_NUMBER_FORMAT, st->name, rd->name, value); now_realtime_timeval(&rd->last_collected_time); diff --git a/database/rrdvar.c b/database/rrdvar.c index 600bd34c..95ab6859 100644 --- a/database/rrdvar.c +++ b/database/rrdvar.c @@ -68,7 +68,8 @@ inline void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv) { freez(rv); } -inline RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) { +inline RRDVAR *rrdvar_create_and_index(const char *scope __maybe_unused, avl_tree_lock *tree, const char *name, + RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) { char *variable = strdupz(name); rrdvar_fix_name(variable); uint32_t hash = simple_hash(variable); diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml index 7f800854..4d9c3ba3 100644 --- a/diagrams/netdata-overview.xml +++ b/diagrams/netdata-overview.xml @@ -1 +1 @@ -<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" version="9.3.1" editor="www.draw.io" type="github"><diagram id="319b92f5-ce0c-3638-1d72-e7e1d323a6f9" name="Page-1">7X1Zc9s4vu9nuQ+u6nmgC/vymKU9PVXTc3JO99y55ykFgqCtaUlUS3Icz6e/ADeRICRLMiRTcpKqxKJkCsT/h/++3OBPs+9/XarFw69FZqY3CGTfb/DnG4QghtT+5648V1cExNWF++Ukqz+0ufDb5D+mvgjqq4+TzKx6H1wXxXQ9WfQv6mI+N3rdu6aWy+Kp/7G8mPa/daHuzeDCb1pNh1f/NcnWD/VTULC5/ouZ3D803wxB/U6q9B/3y+JxXn/fDcJ5+ad6e6aae9WfXz2orHjqXMI/3+BPy6JYVz/Nvn8yU7e3zbZVv3e35d123UszXwd+4Z8rs/yv9N9uyxCYqtSSrV6m+7XpZP5H9fphvXY7/cH9Irq7n6wfHtNbXczsi7lZZ2qtej+tl8bY/2ZqtTZL+4MuplP7HcVy5V48qOV6dZvdLqaP95O5+8XHdfWF7bKRbJ+9fabV+rkhh9vRxfDR6qf9ZpZr8z2EC5U2dwCd+//VFDOzXj7bz9W/hYmofq+GK2SivvC0oT6E9TY9dChP62uqBtx9e+/NprudqR6qedkhQyyy5JOleSimPbKk0yLtk2We32YdklS0uHXXPYJgsJMgFrYL9+NkVh6kj+X/H1aL6iwCe0U1L/LJd2Pv+tFRaWJP2N/d430pVpP1pJjb99NivbZPsPnAh+nk3r2xLhbNne2r/qPrafGY3VYb8Gg3zz7B2sKi3gu1Wpm1gx5iDGFAyp8oEpJYmN3hNEepBiwh9qcEQsMTaahKuAFESYiFxvx2Mb+3Xx6AXIXUAOiG4Opgh54dOv8w66di+ccNYlP7yY/Z5Jv98d79+M8vvzVX7f06bwQ++7tluqttn34zbMKd2LRkcet7WM/cVkD749KsJv+peYHD5qKYzNflrtOPN/SzQ+vj2iKylELuF1QNwanJ11uRuVooPZnf/+5efE6IvZJbFNayzMq7+nW9sABnPgRL3/uYeb7pcrsO0BgYAo2AkfCoCKKjWJj5ymkBPUigK2RWDDH7o4AcpQaXzEpUzEqkmiYq5QhJlHOI9XZmdSi8pAB9fHHAhnKQ8iHGML9UjC2e1w/FvIsxXTzqhyztQwwfBLEX8VM+3MdWWfxUTItleatGXdwGsQFUh2gOo9AhLynfWm3F4gaCGALMIIeJZsgwAFGiKMe1tEQpTDIsqISQU5anNQDLm3/sKsDzYm7iIFNAD5m0MTF6yGQB7keuB5lLu/DCYm1p+uAk18j/KHb8DwslCenyP0lNmiCkBU4ZFZxF5H8EeChjOGAHSBFAGR4JykK61tPkj8nmQpIXy+Rvxe99CNFL529HogyUUhZwxEWmuyhTIE8wRCgjQCHJ03gog5CSHswQaBwLHZiJgMkg2EhQtg8vezBqah/GfqRYT3JLaYcRR4KZWa0sBdPJMutjkL1XDPIKgxxTBLqaHoAgoZbhaE7z1MiIGKSc9SBI4BCBUNKAoidiQHDguKrVx29q+mi6aOzQ/+lhsja/WaPLvX5aqkXfyFtZ0fiH6RD5E/8Mf3a0yifTaXO91kjulyqb2N3rfvzTnf0TsOIaY1Dbz1tgW/tYrR4cLMqvXZjlxG6AWf5WmYM1Pgr3au02F7uXJQDL33GvWqefe3E/tbiof7Y4mejmQ+XzNN5HXH/vF7W23zWvDEoA44CB96UebazMnvOLw4DUay4egIZxaFKLh0W+mPXZj7hGLar8UQCWZViZrnwDOkuMptJwI3GWini8pZFcraqOA7wFkQCcTuxMrdxY7nz7Lq+0ufDhy6/ttWV7cbGY1vLL3vJXu6SJ7nrFUv92Tg8LfEvHe7Zaq/Uqczu9LNaFdgrbni65n37/9MX+nmVbCPzz85e/vNo7d/BpqtbenqXuEdrtxrcXJouVqThgeZhKUNvXh/J2poVJc4+32+uZMiLXHhMnISa+SwLE0PEA8k4BwEOuikDAlIA0ioh96RgMQFshtkMv9udj0byRVN7RD07osMX3zZvNXX6zkFEzJwB3n4HQceng9rPFX6osRhD4H9M5dWfA+GrzBD138+5IyKHIFSjFjAWQS43IfCdyELlR0In7FgiTQxaNQMACkWPx84XsXJ+ijdi32/tNLeeT1UOilX7wnCdwdzjhes0O9+BWNdDcsiHVVQ0wJwkkOYJGkBzDiA4W2Fq6rXJAhx5miQPAG4uDJYLymavJFKXKk53wsCjGFeEQVThMOcpIF4dIoyRlMNUMCp4aFtUFA/s4xE10v4vDgP3LzyKcAyrg8fKahOR1vrCSroXkwSL7RTW1ubRaOJwfu8p5E8W2m2ShpJ8731/d+U2WNSvmE3u6+9rOPus5HZfp07PHV3aHrl5QYLrqCBqqLZaPIK1Dik7GUkZZnONKBLnFGBPJgBVO1Fesh2FwCANxcHbiOPjZjq7FwWQxm4z+9JYa62I5KdV55wl++nF8tx1fn6a9E7w7vncJJxgyPOpT26X+74X7unmxfnB02gmQDjpaQp/XVMXFooyrlFE9u+JkZeaZfSeZF5lv6OyO8cXzzkjGsQrYuAZaK5efzcaVvlubDxFHA4iD6GITpoa2hvk+8dzckB9kZ1yKn1uWRoQUmBvRNSJSnSeEk5RlgueppvGMCAboraeLsCYPoJuSFwqcXFIg9wWEZcU3ows/o/sqgylOPlmQWZtUpN1kAcFxnqQYKcyEBCnOI4JM+ikpaMjGZCDvCctLhZiTXL1yATWzCOjDa3eg4Yp9JcIhEEIMJen57HSKktwQnWUQotxE9dkx7vM5HEgX4IGIXnPt8kA45HNqMfQdo8PKJK4HhyX9BMQ0I7QrbgWBMlF2uTDTVKEMRAwr9zNWICGBsDIOcMIjkhQCIBykrLRPcHzOSozTCT3DCpKAmhvJsBruARrFHgB/D/A59wCPYQ8QI2+5B2QMe0CIvwdUnnEP6Cj2APt7QM65B2wUe4AGZ+Gce/D6XMYYewD9PUDn3AMxhj2Q1N+CQM3IybZAjmELEB3AIFDRdao9aBL/XrEHvreaur8hpyMr/0Qyunl/14QYylIaKoOD+PBd2y9f8eVYh33mUKyjyVYEWZtatW+S4n7Rlqf6+d0a5sVypqbDVRwc+KKhZ/mpWN6ruf1Q5jDmlvRQrJx5YrcUVLWibuvn7u1sMnOVosV89ZcXHvgtYj8bWuCmKmdzqWtgvlDrfEDoRzSv24LkQCjIsC2hIC5TAE7DkkK5Z5wHONKpU4C24v91mZGDKHDlUShL3A9OIfZReZIFmu+uxsCdY1Cv1f704cvfjljvaxYbjN2W1f2uo4tlNOvq5Bff3JkCi8nCrA7cvhP6jzZE7p1nFOs8B+Jr5xGO+8hGEQqrHSEbjzi/e8NL7HUWJvPmLDA1c+SYp6tFH1GDs9BeqIACamBU5QPnP0fBQ//T08QV5wGVZUkxnzr6Kq3Nyh31tQuAr52vs1Ua9hCi5zlYuCFIUp+wRC0m3hl7XcLTCM4UlyElPdTbg584H/sk8uXvk/nj9ziH4HWJQIHzOYb8pJ9malIdSSdIloVutVq3rueeRvu2aUtucW2EBLtXofwl/Kr8pRj2nPBklgz4RnnAnrvAlMHgEbyzRPz42+eXmfhbpQqO/iTaY6fX0/GcPJcwmG6pwsO7E71Of95EE3Lbdd6uJEU3eN5+Vfq/ul3Wfpy2yz5tM6WLVfis7W5acQYnt3zTszb0+e4R/ugmj3Z7IbSdFcoMMD8p1M98mE2yzN2xzVedfb937Whvqw6wqPrf3dZtDbil7vvKHrVQ3lJeInhZ4991hli67fo4L9b6oV7C4anSgbLiPM9kLDsBM9knNhvmG4ZcZ0eY3kPChmI6HU4QOPXOWZPUdHTHviblVgshdMgjc+b2wIONZb/xch3DsA9kmHtsU9l2csgdl4/zeZnz7cjoDPSlUdmqfJLVxCV7g/rhdvYEOE2nzK2HdJ9emSSS1iH6aSCSDTlhexJe2SDznCpGOLDTdJAYq47x02O2cGJVL8Yjxnd0rsC7M0oPcxq1bSi2N66IgHcI4ZGiv7041hYTQURtOgMfjPn46E6tqrKRHOOB+KBlbhfk5FW9LN6kcEz4ORyhvp9XYkqGS6DLbPgR8/lygf9ejfEweJUEvaPwusD6WxwFNEhvhKfL5RnlWdg0dBntaaiWOMbDMKho6B2H18Wl36QpABgch9NlOL5G+1maWbHumGdua83SPujOHnIdVGxSg8Cy2xTrU0mkthQYbHK9yuSopWvAOpm1CV+bhnZ1xkTZSe685co9zEVrTPF2uRBNf/wGgwgMk5kICiUzncb71oiIEXjf/v04W/xt/qHrg4P0lsub2gmHWOWEKz//i1HumykNUDbkpdvWPW17v7WAZ07yDHAeBwnMbx0OQMAYC3kf2gKS1zjnyB7FB29Kd9Q6XzF6Dd2nk8UvW6SO0CYsdVJBCY2UvQih18FMBoxuHmgcdUT24tvHsXQ5eetIx+i5LN5qjZs0hPpCUM1562gN9zJ9ZKAphQgkItAoua9DtvEjWHO6YA3HbxesIXsU4Pwg7JGEFX7I9ZyE3aOs6AdhjxXuwPeon5GyjV7xg7KnoCzEb0jZPToFXAJlxS3BIeLqDIkgcZuqoSBxFe2997nzxcYqenHo7idMcH5GsqMfZH8jsjPyhmTfox/EJZAdgsujO4SeQ661t85B+PH43l5FeHqL5JDub+9io77DP+RiOxlxx+NgeyUzHydxB/1r9iXuMd063z6SqRYdj9VYg5munqeuCCwdcGPIER9h7+YeLbveRvoqb+Mo+jZjv5XOmfs2D/nwlXgsxypksZ9kclYheyVey7EK2UFjrLcUsq+TAwO5/N/Fby+IzBcauEdKAFq7al3thnqaESX/rDtluutgkS59VQJ0BHD6RbrB0FgAmycKjbErcceOVdBAr1rjlIJmHA2EZ8+rP6f9Q8cOmz13KW3SLWXvBMhkSvrzrHOeJ8IQrbVUGYER26QL5OWJBwdao0Bk/aJmqr+AsNWfjxZZfYShq0RYNdXYUIpFd6qxYAQkOcFK44yDnEZEGIS+ZsyYGEIMBKY9NOlA1wCxpckmKw9iu/M4LxVirIRYLgHKex3O3VRClmGSkyzXKuZUQuYXu2AYQBgJJJdd0zyRRbFa3y+ND7LdPWSut9O+q3dyOGQpwz0cMsWSTORAp5SBXEfstE+86FkYh4263nP6oOvBoeuw4lTgRx+Ju716V4xE7pAIgcwp641Yyon9iRicY5lpHFPoCoiGI5YCih0JSF18RTzRYmpZfH/2gHjYIK/rAWLZB81CgxIl+8NHmBtczYlSWNCcZVGH4AyQSEMmRpNo0JPOV8QVJ4vc54eHjfq6IhjCCoYC0r6Zq3maSJhrnQrNJIyoIbbDBVpuSPHtsCBaogAIr2ioYfFtMf/qmjk8rr5OC6+sjB02G+xSzBFZ8zyYw57Fm0udSCoQlAaS1Mh4YJONt3Vj8PIA2GBowGEz/eAa0OYMEguFPsr4YZO/LgJlsmwocpchpk1l9FpTt56imakEZIzKzEiUExjR6KU+S2uy61/wA5Mrgtjc/ucD7DDH3RWJ1dJ7DLHGKEVdM4Nqq91RrBQ1eZZRE1GsNr3NWtUuUMYLYUCqNgHVsTbSCVb1ORSYeXZMWV8nXPlrW23+YakfJt/6KTmni2um7ep7x+VVxeTbi8S3lZWfY4Q190YeskCErO232HM4X1y/j3CPsw9TtZy9vvr0F9cIwXVr+rWTPBYVkw/1N+Dqh2TmfVGL0ld1wt7eTWxb/7FzoJR6ZY4jQ2lkSP73oykf/mcnsl+apRSC5qYLx+eqF8jSZI/NcIibZspE9W/5brKyL6Yd7rq5Q2QUP5nUCWY3T+HuT/ucE98Jz1/VWHp7qsG25IRzwHcwjigE31BB/8jhGxb9v/z++5cbfwjJy9A9Fcx64HpVtuw4weVV+9NgSlOcjqRvj62bwYgcsN+lfxTrSV53NTpM2p9EiN/N++vpgXS3z/0iBTjjfcc24QHjhwX82nGGzwwy7/h4soCDvWxumkY2rgpu0MVmezr+tgR+n8bdHjehzL4YJPdyLSkKNCsKMaa4icAnsgvvSiJO1t4sRX6YY/haPCqy7El2R5HCwEob59Ujte+YapakQmAEFZEZi+jVk147LBzqhsVlQPKdeKDVqSBXLOxmrbK0DzlxhV5iwVCZfQdJnkGluh46iGhCDdVYM86ViZl9BzxAkaZrWhdQUgQABS8SUD0UHZYlfC2MSzA3LdMCTSOZZ7ob9Epzk+TaMq4MwpSjmIH+dkB2w7lYQBsCAc5Fo+jsA22oSZN/r9pQBJoKL3OXhIRRqIaxnqT1uqK2RtX6QcBXcH/PSUMDTRdFwEJpu8WPWV3tsfrdHrYrZvVlujU0UHDYi/qRVCaYGwV1zjRWEZNpBs1CcDDqF0hzjeOeGXIKNhpOEbsD2NvXmzFPrrPAdKBT1TWLKylaB7ehYTdvTlrBwduRdjzOqoNIW/dcGvWRhQD2xT4HQ6MPxwnNbJP6rQA/V8vlpbmfrMq1nq4tzOt618yek80kg5mZP7681NNlg2y2C9cfSjaXeprVq+rbR5og8nLoEgbbOfAotREDZtgMNRgBM1w/Ff9Szx88logalojgbZyoYZenrorHWA56iPvV8ZQPScsClD3CCTGk4pW0Kx2lTMNeuitt3ANn0FWasoAfhD2FfdFMH3oLwo7HR3iVfZMg6HNjCBrPwQn6Jg2pu4cDsSGHflxOnz8unW/JKVsvaDWlT6gkfljH+SAhDbaArV1HgW2/u7tDd3If/edEktOvoW8PR8+pQwMO/BP0uGpV9N/+8euXXap7R0Eu92exnKzM3nq91XyfiuUf528r5Q1YvFvNZ4u+2i13OzTthcli1WUopf/wZeieIJUmhGZwJ+/uIinrXjVVy/m7iYbBab0ySqXLkK+Mx9143TOjWqq2kwoDDioZ8DO3WVevEh9D5+OD3cFp2dnVI/eVTYQftgOXw0YWIbkdRWwPPYMryzbdFMdBmveAo+fF8kkts10c/XrJJmlf3eLNLV4wfqN2hus2g6xpsnRCEiirrT44UbhvY0nPhXhS6dwZtYmLRZnAmlgsJXbFieWMpizTSpzY9sT0bu9YPDEtGccq4GAx0Kr5fB8xHUWf95sPwoC11k76e61Gv48wbrMtd0ljq91/a9V1M89qgfk5nRb6j5ut3aNJpVUun/+f4wa3gvDmwv+WF1AZzy8vfDHLiX0ct9El31it1XLdfE1h19Zcu5tMp5uFdF61msGG/P+q9zRkSmwd0/qJf4Y/N6rGF7W2a5qXd0Wg/tL68Vzagvk+WdcPV7/6394r/7G2YsgaIkttGvpX1+zj3pt195wgYLJ7sxNpHRyFYNRcW5qpWk++md4iQtCqv+GL4+MdICO/+MpPd6seqP61DUKHd8JeA36OvEzMahsGd7LgUM+dj9WSZseSvS9iNSvfujK/QqfJLd37F2CT3LM5ntWqN4e1pdwbZoRsqz+4W02tPd/n13WfwXeXKCKxA/idSXWqaVYmM7M6UQRQlmS5kAZoCAxT8RJFKO7r7xigobBoWp/18kREDFnx1vBbPK4e0kdr9a99DKJ3iUHBUNUJTUBs8l6yEuYwkVmu8xTmiOciIgY9nRgH5tdBEch/xugaMJhNVrpw9lAfgFfZeRSJEl2SKiRIN+uZCpQYSQzFuUmh5jE7PvY5HArVAzVNRi+2q9R2Dmept8we188+vg5rOnoh+MJV+zyFDcbd8g0hFUgo1jKzQjRHWcRUS+pVb7ScqoevkCPmkpo4bsXX2kzN/VLNfHgdlux7EfCyCprrVGZNSgJ5KRw1r+EFMEukUtSCywAGaET21Yza3CEceaj3e5TREW+OrqfJdFL42DqsS+1FYMuyrqrloga5TFlX8QJCJJxbxkVzCjCJ2ZTbY12t6d5lXfh8SeIQjCeZeFTl0fGmiHLo23uhErBgpC5C3KAtLtsn3t+c3w7Bw8cwhvG1B5OIe9qrOlOtlIG9065d/2maMplCI5FK69O+dwLDp093d2VYNxDybd/r4A4Fj0sEpPnZCryhfQdpJFTUerhjYYgzuIfHeTv32Orknxdz0+CpA7QBlzlQjvg8aLbSytxWyFKLydeZmlvozUpa+PQGgMvP7Ew0bdInmzywxjH0Qs76CYJXZZgpkOXtwoVJ/eguzXtZruLlEWsHd9AKpo8Pw2ER8tuPf8rVZLaYmlvn089zJ0y+lS/Uk1lZAvyfk373YIf7bHOTaH/r9Lszb0w/BPrqOX3L5spPn/5iP/2pWDzXXwzs74pQc7OdAdbXLesfDQjtmQJ/m+vbF77N3+Fdmr1Htp5aDne77SMG8qcmX29nprvC+ChS2ouUXhiGM3zLGONEAMIEH7aSDkb14YkK69t+rj35dyCHC5bwPNjFO3yYb8Z9YW7U+rHM+1g5FzpQ88z965qmru3yqreK/KbMC+nzgyGvDJ39EP/cOtiyurZ+KL90YeZJHfIsT8LS2pjJejKrXy7MMi+WVrK69+t1N2YoeNUk5vElr8RCvT9jNTCuhAbHWMYwHeAeieCdvIFaVetsfS96H468b4/UB6L9++7vSOLp3EsL4U0k+eBw+iAGPbjVlnj6zYvh6ADZ98sQP1KT36ZMn1rDV/+xrPG24jdfO9zmPGo89YZgseZcd02zkB5/uMc6QFC6F0HfgQugagavJU1RLxYGRJYwChWQRhqt1cW6APyhf0QOBQaOM2EjgLM9UsDH6wIo8nyiza2D06r6d0hnBDj6KM5ES9+bg4cqbjCIeniLsgAp93ALR5MBFLi/5yOxpe03R+Tq/6+5Ws6Gi/r8gViF/Eykhp5xw/EwYt7k5vXEQ5Rju0fTifchHsrhmDAjmvdHcFEuEgC0NBkD2hLrYsXDoPUVB0M9JOhPjNC6DMIfoYgKaKQEGuJUudaDnVBEhmCCAVOpICTn6eWGIqA3Wk7iYQw9NOkrCkNDe4QiftitnW/wzE3sj2oDnhWyv93q188Ceks5JBBC7MaxelwlohHbQnbojgsV0wA3hhyokNes65kClgmU3rGhA6304D0Xj+5e89VaVctxm1HO4qhe2/eXDofPq7WZVWxk/2Y7vUb3ibvN42JRLB2HGa7a+bSSzKzrgHbY5de7Y+WQG9zoP2ZZ2OuWs+WT+8el2vd2ebEs3Xqr0km5WEw3OSa176/ZhW1bMHjeSfBJO17GbWQx36oxK7NyyFRzp5IuNUXcIU+LskYOuNpjUBU0gvVDGReAycrYHXDv/lSXLFcjVtZub75NVo+Wif6nfL6/7Em88MPkLu1g63NUe+pQmpuncnXF46razFk7P2thSojV6/3UoHBuTFZ/ZbWW+jtc6r89huDTl3/e1I5j9wUri/epKQm/3L90PPSg5fr8KQ3eYxunmE92Hq3eQ2y24NePzar/58OvLbTmjizZZPWH/e9v9l7/5d5Zt8fw6WFSPlt5z+XjfDsG93rA6arobLQj7EuPO0tNlpU+6eBJ+hSChvluVYvVZOcv7bPXPTf9y+f4p79++bvDuJXfH/9yyD5tD8Dt/tXb29uaiC1zrkhUHeOnB7M0Ozjn6eIBx8fAoIijVQ0cynyovQcnwMCIPeyqG51rlqWVvveF3wgfosNS9a+nVgSX2fwIikxkPcvBWaw4ZYpaDglhzJRFSvtV75DCwNhegQLq6CU1y39ppuoq8zF4WDr/tWBQlgME7gwFlKjUZfxr2kwqpyLhGYPKMK10zIoSi2gvAEbg0KCUgaTsIxKhRovByUp/zR70YoDE3Zn/Dk+L/YlQ6vpzu4xaRt7U/Su2Z2bwdpB3S51AnBqS9mM9d/WJZ97uTaB8sjQPxXQngRqa2EO5lRiHpcpfC1sQjJDaqWU5RC+/FuI0IYYZkJKMGppuZwuNP2APxtAgDd8y0P0z7O1hlYXbgHDC4Jb0fhPFwGHAERGKpMRonPv5l0/dJmD9tJ0xasGbaEq9MPg6CLQD524BRFLw5t8eK0qGukooLeyI8opxyISVtQnL9/pMSLxTJuT8x5YJIY00AT39mNIEcUEESBXPTUTdBPnViDxU4HG6mp63wKF+UMv1qotDtdCPi4GOXJWLX1vdGClLXhHWhCDdTyOBCeUppcJIhVIQsSaRMV/HYlLcBgyx4Bi8KJWJQ9nWBHUOCehdD68pe4cISxohYT+bCIpE5QCmGeM6VSgeDPx+z1AG5ok1jZl7MwwitP5redg7JTcqyU0ZShnqZQfkNMFQ85wDA+zxj3nqiU/u4YnngRwgFiOMio9IG7wUDl62XEFZSg3vcXDMTZKizHCVpangUVuueG402XQN7jpvA8z7iOkKAVrul891lUdX4rJHRUo0sD/rbpcnTmGSMparjHAGSMQuT21PiqbQGzRdBbqcOjQGO0YxJj4ipetayG1Pd2UE5AzkrNeSJLOnW6lUYCu1pSA4HrkF75udUPKAfzLg/mIxCjWarJn3SW5aquMYwlSSHrkJpwkDkhmgUg5QxBYhg2K0ELnbfLdeTCRGIh3ZI8Hpiuld6t0YGZpD05vjq1ACpTYc5RKjNCI39xOiZKAlTKjl9hFh1wC195igc73UJlXEMzNUpz1VjcA8UZaBpkrY96iMKLulZ2pLGjK1Q37EKNKboPdMcFrGEbCkypiuWS1TkCaC5wYpTQWEOmJXnkYV7xB8L2VNRDnf79qLQstcaKwkJVk/6d7q5nmqjeX3ucxIHo/cnEHfmSbp8HyLQDiZxzCsybv2o1BWEdzkVPX8KI6/U4INSDmWVrRHZOjA68G0abjU89EHPCn48PD0iWLRT5M/JpsLZR/+vxW/VzfdQOsKmwBayJSuN0xdUgnoiwSSAIMpzQ1hKYmo4VuAYI9JWBM+oOQHq2WuKPFp9rz6c+qj7CrbAdLSbYB1rjIhuoxJYZwAbEUQSDHUKqLiwb0x4lbXDzh4Q07BE0V0yDv2E1oEiAoBmmjW4zNW109MxjKaA5zCnMRsCImGbGaY1ctCymeMmA55155CWsYBcCpTo3vptFRnCeS5kjjDXMVMp5XUJzi0ciVgXnI0JDmLEQsge3gLA+NgekVxXrHcsD7O5fncqdlk6h7yFzP9Zhy5+wlBsCmO6/UDcX9uAlWJh1Tc9QbN1PV93UEz7rf6g2Y2M1/oTWfmyy6a96a7NOeoO96lUQ1GUtYnBPFx5w1l2beuT2L/TuA0412GS+5Pa7k5cPbK8DDQPVypw1rRw87CkbWi5fHoTFqiN905Sx3Mwi5ibzn1D0Bzgv1RS3shuyF1F9gt2keCbO6741s95lBkCz/PHMgTIZufHNl7uI0vYYyzW8LoZjj7iTgsMIPzVPO5YRNJvsLEDFY6fwmFoJ9kIyVIE5ZqIA01ipGI3n4syUADb1oTdN2/ISssRqsdeoT79yKoKbGr0bnjIDUu4t7Nu7CETZAGRhOmNRcRM+QE9EgpAglyAZdNc6ZeR8p37dhlpZeOIM4l7s+xQijJUmAIpJnMQEzrWQzPbsBJdzLrmR6RIndFBC+tZyJUnpOew4wancBUAmIwQYRF9OQTKYcEH57wUB6djMKs92txd60ErzJtcg5T0XOXIAKSnBBMOFBSqYjSuZW7G3IHInWBwA3nMcj9rt2hrEy0IVZMY8r6Ce4wyQViltdKnaMsokNceA5xIdmA3DA4rjtGYhV9195QhisBnnMNu/xc5jlMFGc51CbPMIIRvaEc7EHvZupkj95Rzve7TpxkrDK2Ug648sMdjEqSpTIXKYyonjPuR1Vhmzrbm4cU6A0XJRGevevUSUZrioNc95LpJKX2H0AwRcQIFjEzmlNfgAdGNYRGzUbJjGZ7OMEu07gGZT2SFXtI5Mpxaw0b4zrNkgzYHzOTSSQiSmcIfce/lIHDCwPqWNQC6wPHaoBQ+XzVfa6512NzOeuU1D8GelcNy+xPVGFbdu1YtXkYeNOGMKmWniWuf/FEG7/4uxoRtTU7YxS9ANrQ365eAAezGulVR0IyZDUyoDuiKIXYAe5z1ACNNwkuEQR74aVbKOG+caTyt76Y5cRumgsQ7B82bfqX94JLzYizkQSX/NIe6bsK946agr4khKARjSfof8resSvyTQr+mFfwB4EIZXXQgO+ZRFFsr7da9w2CQpwzn5oyQE0S6jh2eMZwgJpH+BkvgppvEhSyKxjkXOEQPUM5V1FakrM9HIkj0QQGaSbxcknC4n5kWVLEl9K+drh3Lgn1fFvtjMBTyPs9PJcjARg/O8LIuBCGfE3haIRB/06+jywmwg5LG31fCGOjQhj3+8YA31u+L8I490xqIPbLhzsGYTzkm+35enSLjI07BxuWgcwMPTyb/uGZMrPe1ILxN0sM5Ub/zNxft55irWrNTe60tQ9x7XlFz1QO8yjaIe+vdJ7sNWr8JHOjmwtPJv06LbqzZtOAs2+f+bMnWnFnqgWwi73xxufuXOLLTfdP57ccFpC1W91zUvIuHIdOyic3zfg3e0Dc66elWvQPpJcPmuc50jokPzKWuoLTSLN4Pc2wzebsdZsNHJEjQhl7+Rf5AU4e/bicPn9cKv2HKQ/A7g0u/Tut3N57/lTrBgqk397d3aE76XE5chPIL+5mA1tBVWYDR6Cf8GQZCjRTwzxg0UfkcFblmGfVmJnOyJldx3Fp7id2/59vvUHwd/6Beqd9p6U7lXc6NYIQUyYlmTqmaWCacCFlBgnSUDXFwT78IiCL90UnDg1IaO7aG8vhl83ELhQ+UBIF588PZGcL3Nt/r44QoAFR+bpG2F1RudH5qha1W3W+A4X561ZYTQCbP29kucP/YUvrH3b+Guk5ZOflnwA7p+7vPiw7/iEifChcUeOb6aWFRBkgEBCue3hULrR+5jz6EvQyP2jjGY9fI7ONA+rHlZNmHWG7OkD53dGF41N54+Szd9sNeOQ7lcbliHUj0wyKlHSksTRYJ9xhAKWpztoR6yeQxgj0vR8HiOPDu3ftxUrEHklnP1jJ7m4sfeFAhlmiJyu3a0qKflDv2APpGc7npR76Qb2opa40MMr5xGL8t//7V3shVdm9S3YD+bKU6e3I06jerFJBv1MLJ+Wbb+yJdvFOZw/KKnlYSpyCMnCf5k1bZZom2BgFIU/TjG4ztPv+bhzJKcc83iICwj40adyfPR1N1u/hk/vBb3Y1ym+i5G9nNkRmKS+OzvE5zGFN/K6GwzBWdmAjKcgz2CtIMRAmjJhUU8wZ0xFHbDDkzdSlm5LibrA2lOh1eNreQc67cwZpZmamlXbMx4PiO/UqMw4qKELF8l7OYYpBgiWERAJOrXIUMefQ7z1DwVDTCuWPHtFT/gAg2k3rKlsWEBiW2+2cms/F49JBQi8ni/UpVTHzfWEFlCnha4Xk1G9JKXZ7RK8YqKgEaoZMpnpF+FjQhKmMi9SQjGu9WyuzL9FAR4vlkcFNynyjpOFhe+1Q4O2Idnf76WjvusyXV2W+Gcm9LpdW80polgKODUdGRuy43Xbb2PTbHroAaKAI8Igq/jMJ1Aejpnbl9iPFepJbqjo8uO02MzUZcKfD3MEd+yAMmRhg3CNdPC7sqtrT3B7rrCdRM8MSZjmUlapKqbZecZOCAHw76X5qv8W3mbZnFdqFTXRzn0MyDDvmT53OGMhr+PTpzv4JcNZt2VsnCZ6xfr0CCjRFoYGyTHpFOuxSpelkPfvTO3sSHHT2rojPV2UzudWNesXeEmFpXyogEFVKoohFUINBOVgGUqRC3XlolBSpt+b9i8fVQ/HNfaYPwd3JbtcLQVDyfMABNll/zqLMEsR4ziA2kMaclMto34pCIOAQDE1jRicK/jXjWA9RNq9SASAlGBCnKs173aMyBBMMmEoFITlPG+/O3lmHrQTeUzqjoHs1AvYg6odKRCDyHPJFH1Fi9hYid15kpitw82Uxnzz68ZGqF+/ezO7cVYsOsEn51morhKHLXQHO1XFnacgARyChWhjEoUgYbzptlIOeOZWccSO1VcEi+oLwYEQR40M/eBPc7IEJXZAk3Ymv1Xpi7BebqWUEcx9l4x50czjKqKQUJzKFlEpELMrSrPE4WtGZOvspZ0DzmPOlOfHUNQ5haKZ8aJDlEeGWkcBsaDksiiezzOYDTnbxzu99YLgR0xgILgUGCRHM6qxEJAoDWaOQ5ToRkEiuMBGYROwBJ6jf0xO6HnMBHIZ6t1wuuxviUD9YRvfso/DiPdsHo9AKViQTnAOQZ4I5FKpm6KfgVlXMDYGYG40j8kII/WJaEqgPgU248GIDgVtt17ysivWwJ94Z9hCmlFvGRyHKMsVIogGuOaCAkCepxlABnVNgImIPe10wMQx0VWumwl6s1+QF5pcaNV+t1XSAwYtPpT4UgwRSRkmiRC5VhnWiEaG1FAYoTwwgaZYDxqCJaHG0405fkMEkIIPJ4fbrCBmgmtrNUn30IXDx3uMD0UcoxpCQRBqdI2kN3By0nSWRMBaMFpspkPbzEbuEEtTngCjQJBSCQAADRamgG3jtUNOA9sJDxEcQn0CUaCoN0qgkPqqJrwxOhEmx6w4sAIrYbEsQvzmTYKHmTKGwQYSm/gjs4aF9084mr5zz1emM4oZ09No8AsBuXtPBMTQerD08I2mHIji8tVYmtypU9W8PbW4gY/uO+7d/+/1n4tHb7nf4XaSYDK0h+lQxz6HTtBY6cqrYOBJkp5N0aVZPyvMBInDxmfgHs2eXcmg1Q5xJK4N1kkOBGl80QEmmMqqFSEmmRET2zMTAFx3KsgeBoJo/YfIyNcPJUvvQu/wgB4alceVSrjHjknKOEmEVO2Zyk7gx1w5YIhEZyxIuoTEOWpBHlPvMzxYIjW4KuZ5Pmu96St4WcPmZxYOPrYtP/z+QrXEJGAAk4RZ8yCqgCYaV21kkMqM0AShDaQZYpvKIs9cRBz5bo2wIwFBeqjyxw6/fc6LSBDddJeDOHh179MOnoR4V8/Uie6Gbx7l63w9PSbW4/inZHZwZNnHra+C1Eh9I8du/7irUZqOftRgBplLQQXQkMGw0NOLuiE7Eo+WTc/vf96/25crHwXtzD1puye0ZTiQ1yMpk4LhlLaul5HlCMm60VFzmPKZ7EPvRkUCGaXBQEzmxtD5Xy0BHNP1gXIxkrE0D6xkhN02zoaWZFetOB8FiuT5woec84p0N7p1wuNsFO8ZWgcifRHLmVoFvf1oc1i71tPz+6cv4T0tng/un5fIaa1IG3vS0DD3CTd7m+/L+O82CS5noXBDFEXOahWk0C0YSQLkBlrVxGnXkJ/CnNgUajIiAeksiTHBGzayy0fr+XzuYoQkegFskuwEEeAtQ8zra7Kb24IzF8w/lLZFYEoiYG/nhVSEe3XifC3LrqlrtjSFnhHv3tRbaLQdUiPoznh0Wr2U6gqG5IjHG4w0Eelr2BzjYQ3CVJjn16U1BaDhQnGbpQ6I3Nxktz4oXr0QDprcXbwpGJZuzMhbeFG+u3ECAeiiLFV30/fUAX1x0cahIr5fK5BNfjUa71eirVP4EIiIhORaZK2zBEII6BGRyllCsXIGWRkxHLBiF0Nf9W2Og64MPRBbblpDX4N2caKPtR30Mvrf4tsMgJyRJJWQszYzDIKwNEA5YkimkuYaa0jRm9i0dxLcpGYKQBkwQergJMloMrhZuuHBaFAMYHhbrvgoYSkJkIrVWqWIlDHENQ5HyhKtMIEDSVOcR2z+RASdsGs92vSBNwlv89NuAnnnEEMqLp76bC2upjxKmRKaslLFch/DGC2LlImHCUGmE4SBihxxB+/EVBJpZlC/VALDDedA4GI5Vn1eWziujltrPgUDvrOipRB0Frpkq04JWqBM16ihlCdAmtWqYYCiPORhV+i3n+JDntHwgfs+5AM+5jtZcx1Af04QgwBQTJfXrDBgB0jThEmcupitwFtHzCgHEvubTtK/t0F8G+nKJw0XOmZhO6iZczf2BDHVE4b0xFEaSnGcwrxlKk1SFmUpYBvJcYiQzExFS0qsoxyygw7CAFCOXZM1tMHa3WLp1PphBXgp+Zw6ECnAcJdaeIlle8TBVAw4imBCZ4RwqTnJtIgLOGzJD+LCxYNB/4HvbRg247TVrT6uV37egLol/Z9BjCCSIAQJIBb20Fp9K40TDlClkPwNjNrWk0A9bB6LWoSDARTXd2wq9ZeFGSeoH5fsL8DtzW1Xw4yQxmudagh78iM6SPCc8N1SwXJCYJZOoDz+yX/LyES1VR+uzUplarI3+ulQWfR4K35nXqkShdH4qnjrfhEMhrZ2nQiDhioNyyVOMlIxau9kHId2kK/eSdwIS+IiuZ6MFYlpM5n6NEH5ndRwVAqnrmWG5YWXFNgiUhudJhnJAENVAy4h8kA+r02TA6ACh6uFLEsQvIDBz4ng54IIX38LqqGaTEjt7/y4lGtifXedRzeoydm4hmTKWq8zhlUSMZcpBORsbOlNYSB88vIh9tDB8yLK1mS18GL4/dy4EgODEOTRTUAljUrNCbaxForiB1mDgOo/YRQP5KiEN1OmGZuiSK2KE08naWHr6o2xqdeO9YZBb6OUwBVp0MSjsIpOMghQbLXlGIoYUCPI1QhhK5g6EFBq34DWA8MmkX6fFfR+CEF28Y+bY1s/lyBHElLZv9AZ7CZpAjfJUppnCLGZHoc0cr9ZAhjJgm4iAROZReoG/VIUzyKB9XcpuNl9lk9V6azpuKHP3rBpqs74eWya7AzNXkzQMgZfvCbFAITiGLOUTpxm9Eo4wWFBuYfX43e2aa67rbvO4WEyfR4vOcrlfy8V+bZbaB+pu5n01QJUcDRhnwKBu0lN6EvzEHp3X4lQEywCz1VqNl2s2y+tjcbdz82qwiFF/rBFkZGhSXyDHDAvwmblXejoZLxSb9fWxuNvNeTVYRH6XQopDru63QONZQeCqpn0IXLyX8QjTGiKRIJpmuPZ0syZRnbNEpIRh4Bqpwpi5DmjYsYgPJ2mSgG19UR16X6p8Xxbfn1d/Tn0Qvkcfo2VCSYoBcE7GDgiFMTrhWOdZmqNM04jhFurNcoU4kDIKWSBn0O95edEYtBaCGbDB9+hhdB0oJUCoTlulbao8oAlVPINGwxzlMfNuhh2xSCDSEpxaQ64o+WFp1g/2q7LUz/+i7y/X1ZXy84RADTNe4bBJns+Jm6NEsZtpLLKMRww8ezNXIQ6ULtaDqjwUXlHAz0JE+/Ypv/jc1yP93K7r852AUhOUdkdrC53liZYSQZNxiNKIKMTcQyGRQ6OkaXxxsVNrXqpdVLNU+Vzw4kF4DBdkMtGYMVxl/NOmiATRLEFCMZwqTLSMmPEvfA8NHjanDPZu2tUxYbQumtVMLdfZaD001fKGkcc62/36nTQW+gMTWQ4Bee1OmtVicl+sZ35GIn1/mdmWJQqZIC5EXmVm07omRcgMJIAApo2RmcmjJoL5PBGG5pns6i0zUqYYjqA8zlNH/tFyxXZ9/cPwTmIoEFAv8EzglQSewzLaSr/JiNHYLK/vOtrNma8GjJIN5loTMHRhX7t8fnxa3ftxNPoe/dcYicRayRCpymtTl01JIkTiSpW0walWEES0l4WPPxqac8hPNmvuzML5Ca7MfOXaHI+UH24W2D8Ou0fPXg9H9IQzYyE4no4hDltwsD3aS14jN2KCcpzwlCq7BZYbpWlTPpenNDEZk0JzoXEetwVHj/yhpj8yUMSJ4OEZquMQfffFV/N98U355529P3cdh66fFFWSaiNKwLU9X2SecCoI4poJrSLaplQOkqKDBZsw1HA7ivy7caXjxbrz8b9a1vrwa5E5QP78/wE=</diagram></mxfile>
\ No newline at end of file +<mxfile modified="2019-07-10T07:30:20.643Z" host="www.draw.io" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36" version="10.9.2" etag="JRR-u-yNVbUJwUu5Enbm" type="device"><diagram id="319b92f5-ce0c-3638-1d72-e7e1d323a6f9" name="Page-1">7V1ZV+rKtv4t92GNsc9DHNVX6lEU7AU71PWyR5qKoPSh//W3KhAkSamgURH32uesBQHS1PxmU7P9g/ea44Oe06mdtX3Z+IOAP/6D9/8gBDFh6h99ZDI7wuMDD726P//S84Gr+lTOD4L50UHdl2Hii/12u9Gvd5IHvXarJb1+4pjT67VHya8F7Ubyqh3nQWYOXHlOI3v0tu73a7OjNgXPxw9l/aEWXxmC+Seu4z099NqD1vx6fxAOoj+zj5tOfK7598Oa47dHS4dw8Q/e67Xb/dmr5nhPNvTaxss2+13phU8X992Trb7hBzeh7JXdR71kCDQcV5Ftfpv6Z41662n2vtbv65Xe1T9EpYd6vzZwd7x2U71pyb7v9J3Eq35PSvVP0wn7sqdeeO1GQ12j3Qv1m5rT64c7/k6nMXiot/QPB/3ZBRe3jcTi2RfPFPYnMTn0inayjzZ/2qHs9eXYhAvHjc8Als5/INtN2e9N1Pfmv8LEnv1uDlfI7PmB0TP1IZwvU22J8nR+zJkD7mFx7udF1ysze6j47RIZ8iJLUO/JWruRIIvbaLtJsrSCHX+JJDNa7OjjKYJg8CpBFGw7+mW9GTFSIfp3N+zMeBGoI078JqiPpTprQVOprjjsVD9epR3W+/V2S33utvt99QTPX9ht1B/0B/12Jz6zepd8dK/RHvg7swUYqMVTT9BXsJivhROGsq+hhxhDGJDoFUW2IApmJewGyPUAs4h6ZUEouSUkdSwuAXEExLaH+U6n9aAuboDcDKkG0GXBtYQd+uXQOZf9Ubv39Aexhvpmwa8P1csH/fKmchUfVedb+sDw3WsldMOXvv1t2ISvYlORRd9frd/USwHVy54M69O5LNDY7LTrrX606rTwh+5rtA76CpGRFtI/cOYQbMig/yIyw47j1VsP1/rNvkXUkUChcK7LII7fz2/MIJnXwdI4iZnJn2VptwQ0BrJAI2BDZFQOqqPdka1QWwEJSKAtFFYMKZOpZEOOXIkjYWXPhJXtetRyXI6QQAGH2HtZWK0LL2GDJL44YFk9SHkWY5j/VIx1Jv1au7WMMa898Gq+m4QYXgtib+InerjCwljcazfavehUsbn4EsQyUM2i2YxCjTwr+ih8EYvPEMQQYAY5tDyGJAMQWQ7leK4tkQstH9tUQMgpC9w5AKOTF5YN4Fa7JfNBpg1TyKSQGpDJDNKPbA8ye+rG2wprPZkEJ9lG+Uexln/YdgQhy/JPUOlaCHk2dhm1OctR/hGQQhnDhn2AsA0owxuCMpOtNao/1Z8PWEG7Zx21r5MQoj9dvr0TZSDSsoAjbvveMsocEFgYIuQT4CDB3fxQBiElCZghEDsWlmBmG7YMNtsQlK0iy2rSaaiHUV9p9+uBorTGiCZBU4ahoqBb7/lJDLLfikE+wyDHFIFlSw9AYFElcDxOA1eKHDFIOUtAkMAsAqGgBkPPzgOCGcfV3HwcOo2BXEbjEv1HtXpfXqlNl34/6jmd5CYvVKrxSS4ReY/vw6KmVVBvNOLjc4vkoef4dbV6y1/fK6k/hl1cvBn01PcVsNX+2AlrGhbRZTuyV1cLIHtXs+3gHB9t/a6vFxfrtxEAo9/odwunn37z0FC4mL9WOKl78Zei54m9j3h+3YrTV9dqzTaUAOYDBp7UejTeZSacXxwatF58cA00bIYl1al1gk4zKX7sbbSiopc2YL6PHbms34DnW9KjQnIpsO/a+cmWWHMtTHVskC2IGOD0yc7UmRtL83fa5eXGB3YrZ4tjvcXBTqcx11/qlGfqlureslfMTZ9O22GGqyx5z8K+0w99vdK9dr/taYNtRZfcP9d7FfU7JbYQuNmv/O/D3rm1uWl27wteWmah19346kC9E8qZBIyYKQK1er+ubGeeLd0gJdvVcd+RduClhDgxCfHXNEAeNh5AKS4AOCtVETBsJSDNRcW+xQYZ0M4Qu0Qv1h204w+smXd0Vysd1hk/fxif5UpBxmlqBfg6D5jYZQm3+wp/rqMwgsClXOK6L8B4+PwECXfz65GQdZFrIxczZkAulbafdiIbkZsLOnFyB8JEVkQjYNiBiE3x85n2uWmKxmpfLe/Q6bXqYc3yHK+Wcp7A18MJ27vt0A+uTAOPKzHkLJsGmBMLkgBBaZMAwxwdLHCx010YBzTrYRbYALxNcbDkYHwGTr2BXCelO+F6UYwtwiGa4dDlyCfLOEQeslwGXY9Bm7uS5eqCgUkc4ji6v4xDw/6Xf4lyNpiA79fXxKSvg47SdAtIrq2y3zRT40NhR+P8vXfZiqPYapEUlLzJ0vVnZ/6W22q2W3XF3UlrZ5X7+Twpk6RnQq68Hrp6w4BZNkdQ1mxRcgR5nsnQ8ZnLKMuHXYlNdjDGRDCglBNNG9bZMDiEhjg4++Q4+JexrsJBvdOsbzz3RhZrp1ePzHntCR79x74vsW+apgkOfj2+9xM4GDK80Vy7TP3rtr5cq92vaTq9CpAldCwI/bVbVdzuRHGVKKqn7tgKZctXn1ittp/e6Lwe48vPOyMYx45hjyuh2uXyL9vjirRbm2cRRw2Ig+jHJkxl9xpyXE+5uSFfa5/xU/zcItpECBtzaS9vIlwvsAgnLvNtHrgezW8TwQDdSdkiLM4DWE7JMwVOflIg9w2E+e2h9NrpjO6tDKZo/aRApvaktrucLGBzHFguRg5mtgAuDnIEmUinpKCsGBOGvCcsfirEtOZKlAs4TYWAJLxeDzRssa/E1giEEENBEj47z0VWIInn+xCiQObqs2M8LeewIV2AGyJ68bGfB8KsnHM6Wd8xWq9MYntwGNHPhpj6hC6rW5tAYTnqdqHvUQf5IMewcjJjBRJiCCtjgyR8R5KCAYSZlJXFE7w/ZyUP7oSpjRUkBjM3p41Vdg3QRqwBSK8B/so1wJuwBoiR71wDsglrQEh6Daj4wjWgG7EGOL0G5CvXgG3EGqAML3zlGnw8lzGPNYDpNUBfuQb2JqyBoOklMNSMfNoSiE1YAkQzMDBUdH3WGsSJfx9Yg7S3mur/TE5HFv3JadPNk6tm21ldSk1lcBCvv2qr5Su+HetQz2yKdcTZisBfpFatmqS4WrRlNH9+fQ+tdq/pNLJ3sXbgi5qe5Z9278FpqS/5GmP6lmrtUG9P1JKCWa2oXvqW/tivN3WlaLsV/u+NB/6O2M8zLXBclfN8aHmD+Uat8xqhHzt+vyhINoSCJHshFMSFC8DniCRT7hnnBon02SlAL+L/Y5mRmSjwzKMQlbivnUKcRuWn3KAc6xoDzcdgfq/q1W7l6B33+5GbNcZuo+p+3dFFCZr+jPPbQ81ToFPvyHDN5ftE/9EzkRP8jPLiZ0N87WuU4yq60TaF1d6hG9/BvyvDy16JF+qtmBeY09TkaLlhJ4moDC8sDsyAAubAmJUPfD0fGZn+n1FdF+cBx/etdquh6et4ngw1q/d1ALyvfZ0Lo2EFJfo1jIVjglhzDrOcTj3FYx9LeNoAnuLCZKSbenvwT87H/hT9clpvDcb5MMHHEoEM/LkJ+Un/NJ36jCW1Ium1vYVVq+9rkrBovzdtSd/cIkKC9TtT/hL+UP5SHvs5O6WzhME3yg37uR+YMmhkwZIiYuFq/20h/l2pghvPiYrtvH5jczhPJwy6L1Th4dcTvT6f3+w45PYav21Jiq6R384cr7zcZe0/bvvZ3NZ0vHZo5rXXm1Z8gZNbfCuvZX2+K4Q/lpNHl3shLDorRBlg6aTQdOZDs+77+oyLfNXm+EG3o92ZdYBFs3/1afXSgB2qrxf1qIVih/IIwb05/nVniJ5erkKr3fdq81tYP1XaUFYcBL7Ia5+AmUgSm2XzDU2us3dsvbOENcV0liSBgeu1s8aa01Gz/ZyUL+4QTEyes2ReMDx43tk/e7neI7DXFJgrLFPUdjIrHXuDVivK+dZk1Bv0nnT8MHqSsK6TvcH84V7tCfA5nTJfZNJVemWSnKwOO5kGIlhWEi444YMNMr/SxDAHduIOEptqY/wz8DtarXqdzVHjr3SuwK9nlK7nNFq0oXi5cUUOeIcQvlP1Lw5uaosJI6KeOwOvjfn80e0qU+VZc2wOxDMtc5dBTj7Uy+JbCsfsdA6Hqe/nlmwlzSXQUTb8Bsv56AYfw01khlQlQYIVPhZY/w5WQJn0Rvh5uTwbyQvPDV02lhtmt7iJzJCpaEiww8fi0t/SFABk2OHzMhw/Yv30ZLPdX9qe6aWVPfWgr/aQW0LFc2oQ6C03xdqLiLQoBQbPuV5RclRPN2CtNxcJX88N7eYZE1Enua8tV05gLrfGFN+XCxH3x48xiEA2mYkgUzLT53jfYhWxAd63x0Gzc9TaXfbBQbrDxZ+5Ew6xmRMu+v6hdPSVKTVQ1uSle6l72sv91gyeOcF9wHk+SGDp1uEAGDZjJu/DooDkI845skLxwbfSHS2crxh9hO6NeufwBa1je9KsdVybEppT9iKEqQ5mwrDp5obGUe/IXvz+OJYXTd56p2P0q3a8s3t8TkOYHzCaOd8dreGpTB9haEphGxIRaC65r1mx8V+w5vOCNRx/X7CGrFCA8x9h30lYOx1y/UrCrlBW9B9h36vcQdqj/oWUje2K/yj7GZSF+Bspu0KngJ9AWXuHYBNxPR/ZRuLGVUNG4jo08dn+0oWlMvTyoXs6YYLzLyQ7+o/s30R2Rr6R7Cv0g/gJZIfg59EdwpRDbrHf+grCb47v7UOEpztIZOn+/S42mnb4m1xsn0bczXGwfVCYbyZxM/1rViXue7p1fn8k0+kseaw2NZip63nmFYGRA24TcsQ3sHdzgpbL3kb6IW/jRvRtxulWOl/ctzkrh7fEY7mpShank0y+VMluiddyU5VspjHWdyrZj+mBjF6+aF+9oTLfaOCeUwJQX1frenqop9yg5J/+Uplu31ikSz+UAJ0DONNFusbQmAGbnxQaY1vijt1URQNT1RqfqWg2o4FwcxJ2G0mmY+vNnvspbdIVZUs28IVLkvOsAx5YtiSe5wnHJzDHNuk2SuWJGwdaI0Nk/UfNVH8DYWF3oJCVRBjaSoTNphpLSrG9PNXYZgRYAcGOh30OApojwiBMW8aM2VmIAcO0hzgdaBsg1pN+PUxB7PU8zp8KMRZBLBAABYkO53oqIfMxCYgfeE6eUwlZutgFQwPCiCG5bJvmiXTaYf+hJ9Mge72HzPZ22tf1ThqHzGU4gUPmMMu3A+C5lIHAy7HTPklFz8w4jM31hNMHbQ8OdYcVbQIP0kh83au3xUjkGokQiICyxIilgKhXROIAC9/DeSpdG6LsiCWDYUcMWhdvkUxUmOq1x5MUENcb5LU9QIz6oCloUOKI5PARpgdXc+I42KYB83MdgpNBIjVtMeJEg4R23iKpWO8EaXm43qivLYIhnMHQhjS5zfW4awkYeJ5re0zAHC3ExXCBhTSkeCdbEC2QAYRbNNSwPey0/tXNHAbhv412qqyMrTcb7KdsR8Rc5sEAJna8gfAsQW0EhYTElSI/sInY2/q84eUGsEHTgMN4+sE2oE1vSBQUkijj603++hEoE1FDkZKPmCdnm1611Z1P0fQdC/iMCl8KFBCY46aXpkVanF3/hh+YbBHEWuqfNMDWc9xtkVqNvMcQexi5aHmbQT1l3VHsOFQGvk9ljmo17m22MO0MZbwQGrRqHFDd1EY6xqo+jQLZ8t9T1rcUrjxbVJvv9rxafZhMyfm8uKa7uPsEu3yomPzlIvGXysq/YoQ1T408ZIYI2aLfYsLh/OP6fZh7nO02nF7z49Wnh7oRgu7WdLaUPJYrJmvzK+DZC6uZutACpR/qhP1yN7GX+o99BUppqsxxw1CaMyQvBjJ6+KJW2W/NUjJB87kLx/6sF0hP+oN4OMSfeMrE7O/oUytUbxpL0vX5DDmjeCRdrZj1PIVSVz1nPe2E5x9qLP1yqsFLyQlfAd/MOCITfE0F/RsOX7PqP7y+rvxJDyF5G7qfBbMEuD6ULbuZ4EpV+1NjSlM+HUm/H1t/MiNywGqHztv9ejDvarSetv8UJV5qJe8nAdLXfe4/UoEznnRsE27Y/DCDXzuf4TOZzDu+OVnAxl42f+JGNroKLtPF5uV0/JcS+NM0Xu5xY8rsy4PkqVxLigzNikyCKd9E4E/aF5YiItb7qVmKfD3H8LZ4VETUk6xEkYOB0jbaq0fmvmPqMcu1bYygQ4TPcvTqiVQ7LGzqhsWFQfN98kCrz4Jcu6MWK/TdJOTsLfQS2wxF2XeQBD50nGUPHUTUopJ62GOcOzLP7DuQAhSJu6YtA0rYBkDBHwmoBIrWyxLeFsFlMz0tUwHNQyLwveWglxtIK/CU4PIhdDnKM9C/GJAdSy5msIaAQXLRXGz2jDUUp8n/VmsoB5raqcxdYlJGphrG+SStjxW1xabWfwT8gPRPOWmooemibdihLLrFb7K5mhD1r3vYtljUR+nWUEKbw0TUj7jCwlw60AuYh50ck2kyzUKwMepnSHPNxz2TlRRsYyRF3h3Avr/ejKX0OjNMB/qsumZ7S4rWwY5p2M23k9bm4PtIuznOqrVIO++5tNEsCwFMqn0Osps+nE9o5iWtv1DgX9VyuScf6mF0r5/XFuZjvWuaE+t5kkFTtgZv3+rnZYM8Lxeef8l6PpSwrD5U376hCSJvhy6hsZ0Dz6U2IiMM46EGGyAM+6P2rTPZTYlEFItEBHfyiRouy9SwPcjLQQ9xsjqe8ixpmYGy73BCZKm4Je1KN1Kn4VS6K43dA19gq8RlAf8R9jP2F/H0oe8g7Ob4CLeybxIESWkMQew5+IS+SVnqruBAjMnhDXqNSaGnfUva2HrDqol8QhHxzTbOroDU2AJ27joyLHupVEIlsYr980maM11Dv2COhFOHGhz4n9DjamGiX52fVV4z3ZcM5Gh9Or16KFe265XlO2r3nr6+rVRqwGIpbDU7SbNbvO7QVAfqnXBZoET+w7eh+wmpNCY0g5IolXIy1lPVVAvJv5xoaJzWK3KpdMnKlc1xN273zKgFVReTCg0OKmHwMy+yrj6kPrLOx5pawUbU2TVF7i2bCJ9tBy6yjSxMejsXtZ31DIZKbOopjpk074xED9q9kdPzX5Po20s2QZPmFo9P8cbmN9fOcMvNIOc06WklCRxlrda0Kly1sWTKhfip2nlp1CZud6IEVkthyVJ3bCnJKKMyLUur7ZSaft07lp+aFoxjx+BgkVCZ+XwVNZ2LPZ9uPggNu7XFpL+PWvSrKONFtuVr2lhZ98OFuS5b/lxh7ruNtvf058Xu0WRmVfYmd1oa7NiExwfuowMoiudHByqyV1ePoxc6khth3+n148u01b3Fx0r1RuP5RpbeLSyDZ/LfztfUtJV4cUzrHt+HxdjUqDh9dU+t6KwIzC86fzydtiDH9f784ebv7hPv0o/1IobURqTnyZj+s2PqcR9kf5lPEJD+g3wVaUs4MsEoPtaTDadfH8rETZigNb9CRcvxJSCjdPFVOt1t9kDznz0jNHsmnGrAz1EqE3O2DJkzKXA4k6WvzTXNK7ecuhCbi/IX7yxdoRPnlq78Axgn9zyz5+yun5l1QblvzAh5qf6gFDbUfj4pr+d9Bn9doojAGuAl6XquR/0omZnNE0UAZZYf2EICDwLJnPwSRShO2u8YoKyyiFufJfJE7Dx0xXfDrzMIa+5A7fr7aQyiX4lBm6FZJzQbYhkkkpUwh5bwAy9wYYB4YOeIwZRNjA3z66BtyH/GaBsw6NdDr633Q0kAbmXnUWRH6BLUQTZZznqmNrKkIJLiQLrQ43l2fExKOGSqB4qbjP7YrlIvSzhFvZ4/6E/S+Fqv6egPwReetc9zsMR4uXzDFg6wKPaEr5RogPwcUy1pqnpjIakS+DI5Yn5SE8cX8dWXDfnQc5ppeK2X7Psj4KUMNN2pTG0pCeSRcvT4HF4AM0s4DlXgkoABmqP4ikdtvqIcuan3ey6jI74dXaN6o95OY2u9LrU/AltKdM1aLnogEC5bNryAbVucK8FFAwowybMpd0p0Lbbuy6ILf12SOASbk0y8UeXR+U0R5TC93zOVgBkjdTnEDRbFZavE+2P+XSK4mQ3z2HytICTy5fZZnannOBImuN3T/aepy4QLpUCOO+f2lRMY9vZKpSisawj5Lj5bwh0ysksOSEtnK/CY9ktII6ai1vUdC1mcwRU8zi9Ljxed/K12S8Z4WgJaRsqsqUfSMqgZeo7cmSHL6dT/bTotBb1mRIs0vQHgYp99EU3j9Mk4Dyx2DL2Rs/4JwasozGTI8tbhQmv+6DrNuxfdxdsj1tbuoGVMH8+Gw3LIb3//U4b1Zqchd7RPPwi0MhlGb5yRDBUB/u9Tr51Z4aTYfE6039H23RcvTDIE+uE5fb34yD97/1Pf3mt3JvMLA/Vb29Tc7NUA68du6zwGoeIpcNTydt64WnqFX7PsU2RLmOXwdbd9joH8hgz6LwvT18L4KKe0FyFSYRjO8A5jjBMbEGbzbCtpY1QfflJh/aKfa0L/rSnhjCU8NXXzGh9yKPUFA+n0B1HeR6hd6MBp+fpv3TS1r25v9lE7+BPlhSTlQVZWmnjfJD9fHGw5O9avRRftyJY1D3lGnNBTe0yrX2/O33ZkL2j3lGbVn8/vO96Ggg9NYt685JW8UJ+esWoYV0KNYyzz2DrAFRLBl/IG5qba0tInovfmyPvLkXpDtH/V9d2QeDpPpYXwOJK8djg9E4POnOqFePqfN8PRBrKvliH+Tkv+JWP6sy18Z6pE485M3vy7JG2+xoynqSFYLObr5a2ZyY5f32NtIChdiaC/wAUwawbvCeqiRCwM2L7FKHSAkEJ6nvNjXQDpoX9EZBUGzmfChgFnK6SAb64LoB0EdU/uaDiFs7+zdEaAo4L9RbRMe3Nw1sQ1BlHXb1FmIOUKbuHcdAAF+r+vI7Gi7VATefbvv4HTa2Zvan+XKIP8i0gNU5sbjrMR8zg3L6EecmHbFZpO/A71EA3HhD7xeHIEF+W2BYAnpM+Ap4j1Y9VDpvUVB1k7xOhPzKF1GYT/hSJmQCMR0BCnjm49uBSK8BG0MGCOaxMScPfnhiJgarScwNkYumnSVy4CDa0Qivhv37p0hdR2E6dHtYHULmT1fWu6fhbQHcohgRBiPY41JVVy3MQuIJt1x5mKaYAeQw4ck9ds2TMFlBCIvGNZB1rkwZu0B/pcrbDvzG5HL0Y0i2P2Xn3e0zichH3ZnImR1ZvtJBrdW/o0g06n3dMSJnvX2qdl+bI/D2ibXX6JM84ccpkTTWWvrY4ryRbUHwY9Z9XTBe1e5NYLIydlp9N4zjGZ+/7iVXhpCTLPWzc+6ZKX8SWyyOFszEozGjIVnymiy5wimsnddlQjB3TtMZgVNIJ+LYoLQCuUagX0p//MS5ZnI1b6em2G9XCghOg0er7/rUg888MEOu3gxeeYralGaSBH0d21B+FsMZuL+VkdGUFsfr97MQpbUvrzS87uZX4Nnfqv2BDsVW7+zB3H+gKhwntDRoTvrV46bnrQ6P7SUxpSjy21YV5/lbUSD/G8BGeF+K4vd88W0Gppsvj18En9c6TOVdaf9BdsOKrVo2eLztkbtF7G4EoP2AjbSwutCfvW4zZd6fuRT9rISXsmaMixMi3C+qs/WmWtE276t/n4n4PKqca40t+F/62zTi8H4F7/6c7OzpyIC+E8I9GMjUc12ZOvSM7Piwe8PwYG7XysqoxDmWetd+MEGJhjD7vZib5qlqXSvg/tdCN8iNZL1d+eWhEcZfMjaPu2n9g56B0rdplDlYSEMM+URUqTVe+QQsPYXhsZzNGf1Cz/rZmqoZ/G4Hrp/NuCQRENEChJCihxXJ3x79F4Ujm1Le4z6EjmOV6eFSUK0akAGIHZDaUwJGW/IxFqYzFYD71//ZrXySDx9cx/jafO6kSIbP2Wuo25jvwz71/xcmYGXwzyXlDHEKeGZPG1hLv6k2ferkygoN6TtXbjVQLFNFFM+SIx1kuV3xaxYDNC5k4tJSES+bUQuxaRTAKX+FRS92WxEPsDVhAMMdLwDgPLf7K9PZSxsGNQThjskMQvUR44NDgiTJGUPBrn7h/uLTcBS6btbKIV/BxNmd8Y/BgEFgPndgBEwubx3wlRZGVtFVNa2DvKKzZDJ4RqTxh9lhRC9i8VQtp/rIQQ8pBHQMI+ptRC3CY2cB0eyBxtE5SuRuSmAo/Pq+n5Dhx6NafXD5dx6HS8QSdjI8/KxbetboxEJa8Ie4QgL5lGAi3KXUptKRzkghxrEhlL21hM2DuGjZhxDF4ulYlZ3RYHddYJ6G2PrIl6h9iKNLaAyWwiaFtOAKDrM+65DsoPBul+z1AY5onFjZkTMwxyaP23kGG/lNwoIjdlyGUokR0QUAtDjwccSKDYP0+uJ2lyZzmeG3KAWB5hVPyOtMGfIsGjlivId6nkCQmOubRc5Evu+K5r81xbrqTcaCLuGrzsvDUI73dMVzDQcrV8rq1kXYGjHhUu8YB67S13eeIUWi5jgeMTzgDJscvToidFXOgN4q4Cy5LaNAY7j2JM/I6Urm0ht+Lu2SYgYCBgiZYkvuJux3FtrLS2sAnOj9w2T247oeAG/6TB/cXyKNSIs2Z+J7lpZI5jCF1BEuQmnFoMCCaB43KAcmwRkilGM5F7ke+WiInkkUhHVkhw2mJ6R3Y3RpIGUCbm+DrIgsKTHAUCIzdHaZ5OiBKGljCmltvvCLsaqL3CBJ3tpTaZRTx9ST03YaoRGFiOEqCuY6vPqMhRd4vUVltQ01bb5EfMRXsT9JsJTqM4AhbUkXJ5Wy1c4Fo2DyRyPGpD6OXYlSc2xZcIvpKxZufC37/ai0KjXGjsCEr8ZNK9ss0D15NK3gfCJ0F+5OYMpp1pgmb52zaEk3keG2vyq/0olM0ILgPqJPwoWr5TgiVwORZKteco0EGqB9Nzw6WEj97gScHrh6c/KRY9qj/Vnw9EffiP2tezkz5DawubACrIRK43THVSCUiqBGIBiSkNJGEuydHCVwDBKSGhtvAGI99YLbNFiU/NSdhtpFG2le0AaeQ2wF7g+La9LJgcjC2AlQoCLoaek6PhwVNjxJWtb3DwmpyCnxTRIb/YT6gQYM8Q4BGPJeSMsvUt6TOfBgC7MCB5NoREWTGTzeplJuMzj5gO+dWeQhrFAbArXOkl0mmp51uQB47APuZOnum0gqYJDpVeMWwvOcqSnOURCyAreAsN42ASRXGpYrlsfZzO8yk5zXpDP+ShbAylJncyIQjGxXGJfiD6zx9DVeI6FXeJQTPz+r7lQTP6V8lBM88zX+ifpZkvr9E8Md0l5qPl8S6xabAhZX22TdK4Sw1lWbWuT+D0mcDnjHfJ3nJyWsufNWevZJmBruBKzdaKrscL76wVjdhjadIS/bM8Z2kJs3AZsTucphkg5uD0qKWVkB2TehnYC7RvCLJ52h2/sGPWRbadzjMH4pOQzT8d2Su4jX/CGGd9Cxs3wzmdiMMMMzg/az43jCPJW5iYwSLnL6EQJJNshACuxVwPCEmlw0iO3n4sSMYCj1sTLLt/TbuwPFrt0He4f38ENQXWNTolDlypI+7LeReKsBbygPQI8zxu55ghZ8MUKW1DgpzBZRPz1MdI+asduyzy0hHEucDJOVYIWb4LJIHUFz7Ic/dsZ3nX4KT7tN0zfUeK3BYRPNo9E9sJApJwmFHpWdAVgEhMEGE5evKJEFmCZznclEcnchHWq7W421aCzzJtAg5dO+EuQQRYASGYcOAIx8lROy/07jO5DZE6Q+CG8zzI/avdoSxKtCFKTWPKkgnu0ApsxJSsFV6A/Bwd4nbKIW4LliE3NI7rziOxiv5qbyjDMwUecA8uy3MRBNByOAugJwMfI5ijN5SDFegdT51M0DsX/v7ViZOMzTZbLgfcSYc7GBXEd0VguzBH85zxdFQVLlJnE/OQDL3hckmEZ786dZLROcVB4CWS6QSl6i9AMEVE2izHzGhO0wrcMKrBNGo2l8xotoIT7GdurkFUj6TUHrIDR0trD8aba9e3fKBe+tIXyM5RO0OYdvwLYWBeaDDHci2wXnOsBjCVz8+6z8XnGsSH/aWS+oGhd1W2zP6TKmyjrh3hIg8DP7chtGa37lu6f3Hdk+ni79mIqBezMzaiF8Ai9PdaL4C1RY1IVUdCkhU1wmA7olwKsQ3S510DNL4luEQQTISXdqCAq8aRol9VZK+uFk0HCFYPm8b9yxPBpXjE2YYEl9KlPSLtKlw5agqSmhCCWDV+Qv9T9otdkd9S8MdSBX8Q2KasDmrwPZNcDNvtrdb9hqAQ5yxNTWGgJjF1HFs/Y9hAzXf4GX8ENb8lKKTuIJNzhU30NOVc5dKSnK3gSNwQSyCTZpJfLolZ3W9YlhRJa+m0dbhyLglN+bYWMwI/Q9+v4LncEIDxL0cY2SyEobSl8G6EwfSZ0j6yPBG2Xtro70IY2yiE8XTfGJD2lq+KMM5TW2pgr5YP9x6EcZNvNuHr8RbIeHbnYMl84Mush+e5f7jvyGZiasHmN0s05UYXmf5P30+778wtN/HqXnsd116q6JmKbB7FYsj7B50nK40a/5S50fGBkXT/bbSXZ826BmffKvNnP+mOl6ZaAHWzf1Ljc1+9xbeb7n+e3zJbQLZY6oSTki/DMeukHOlpxleKQfT7Uc/pJBkylQ8aBAHyPJP+8JmrC05zmsWbsgwX2ZyJbrMGFnlHKGMl/yJfw8njDXqNSaHneE8yYoDXFzjy7yz09srzpxZuIEP6balUQiWRknLkjyG/eDkbWCmqKBs4B/rZKV2GDM3UMDfs6HOUcMrkaPmzMTNLI2deY8eefKir9Z/spAbBl9IM9Uv7TgvNlSXPlTYhMkpKkvOYpoSuxW0hfEiQB524ODgNvxyQxZOqE5sGJMRnTYzlSJfN5F0ovKYmMs6fz+jOBXB3HsN3KFCDqvxYI+xlVfls881a1L5o862pzD92h7MJYK3Jsy7X+F/v1pLMzj+iPbPiPPpjEOdU/7eKyM6fiQjPKlcU+2YSaSG5DBAwKNcVPCo/tH7ma+wlmMr8oLFnPP8amZckoDcItTZbUrbhGsbvK1049qITW/up0z6DR/xSbRyNWJfC9aHtkiVtLCT2LK4xgFzX8xcj1j9BGyOQ9H6soY7X7961kiixV0g6+0+UvN6NJakcSDZL9NPK7eKSov+o916GTG2cv5Z66D/q5VrqSg2jnD9ZjV9VD9QB1/EfdLIbCHqRTl+MPM3VmxUZ6CWno7V8fMWEard/6exBMUseFgK7IArcu0HcVpm6FpbSgZC7rk9f2mgn/d04J6ccS8kW26DsTZPG07Onc9P1K/jk/pM3rzXKj6Pk37dtyFmkvDk6Jy1h1mvitzUShrGoAxtxQeDDREGKhNBiRLoexZwxL8cRGwylZurS55Li5WCtKdFr/bS9tZx3Xxmkacqm53ha+KSg+Eu9yoyDGRShw4JEzqGLgYUFhEQATpVxlGPOYbr3DAVZS8uUP/qOnvJrAFEt2rKxpQCBYbTc2qk5aQ96GhJer97pf6YpJscdpaBkBF+lJBvplpT26x7RLQYqioDqI+k7iSJ8bFOLOT63XUl87nmvW2XqLcrYaHl5ZHCcMh8baTjbXtsUeHtHu7vVbLRfXebLZ2W+PglSXS6V5WVR3wUcS46kyLHj9qLbxnO/7awLgBqKAN9Rxf9FCrUmnYa6c/WVdr8eKKpqPOjllk2nnpFO67mDl/YHZsjkAcYV0sXzhd2s9jRQbO0nNKovmcWUhFJa1XGcRb3icwoCSO+THhrqKuk908tZherG6l58nnUyDJe2P/N0RkNew95eSf0xSNaXsrc+JXjGkvUKyNAUhRrKMukW2bA9x3Xr/WY3xXsCrMV7WyTnZ2UzgbKNEsXeAmGh3jrARtRxBMqxCCozKAcLQ4qUqTsPzSVF6rtlf2cQ1tpD/Z0kBF9PdtteCIJI5gMOsPSTcxaFbyHGAwaxhDTPSbmMJndRCBgcgqZpzOiTgn/xONZ1jM2tNABIBAbEqeMGie5RPoIWBsxxbUIC7sbenZWzDhcaeEXtjIzu1RywB1EyVGIbIs8mX/Q7Ssy+Q+W22r5cVrhBr92qD9LxkVkv3pWF3VdXLWrAWtFH4YsQhjp3BWhXR0nRkAGOgEU9WyIObYvxuNNGNOiZU8EZl8JTJliOviCcGVHEeNYPHgc3E2BCP0iTvoqvsF+X6sKyoQRBK42yzR50sz7KqKAUW8KFlApEFMpcP/Y4KtXp6v1TwIDH85wvzUnKXOMQmmbKmwZZviPcsiEwy+4cOu2R7PmtjCT78c7vVWD4rKYxsLmwMbCIzZTNSmzLwUDMUcgCz7IhEdzBxMYkxx5wNk339IS6x5wBh6beLT9X3GVx6NWUoJukUfjjPdtro1ApViQsHAAQ+DbTKHTioZ82V6ZiIAnEXHo4R1kIYbqYlhjqQ2AcLvyxgcAX965BVBWbwp79y7CHMKVcCT4Kke87jFgewHMJaEPILdfD0AFeQIHMEXs41QUTQ0NXtXgq7I/1mrwh/FzptMK+08hg8MenUq+LQQIpo8Ry7EA4PvYsDxE618IABZYExPUDwBiUOe44FuNO39DBxKCDyfr71w0UgE5DLZaTRB8CP957vCb6CMUYEmIJ6QVIqA1uABadJZEtFRgVNl0g1Pdz7BJKUFICIkOTUAgMAQyUSwVdxmuH4ga0PzxE/A7iE4gsjwqJPBQRH82J70hs2dLFujuwDVCOzbZskm7OZDNTcyZT2CCHpv4IrOCh/dbOJh+c87XUGUUP6Ui0eQSA/flIB0fTeLAF82xIOxSbwx21y+TKhJr9nUCbHsi4+ET/nTz96jPx6M7yNdJdpJgw3UPuU8VSDp24tdA7p4ptRoJso+72ZDhyUj5ABH58Jv7a4lmnHCrLEPtC6WDPCqCNYl80QJbv+NSzbZf4jp2jeGZ2xhdtyrIHhqBaesLkz7QM6z0vDb2fH+TAMNpc6ZRrzLignCPLVoYdk4G09JhrDSzbsn3mW1xAKTW0IM9R77N0toBpdJPJ9fyp+a6fKdsMLj/ZqaWx9ePT/9cUa1wABgCxuAIfUgaoheHM7WxbwqfUAshHrg+Y7wQ5zl5HHKTFGmVZAJryUsUnO/ySPSdmluBzVwn4ao+OFfrhU1OPila/47/RzeOret9nuWR2c0kueT04k23ilrTA50a8IcVv9borU5uNZNZiDjAVNs1ERwzDRk0j7t7RiXhj5WRL/TP+V70N0zj4be5BJS254mFLUImUTgZaWs51tRA8sIjPpSccLgKep3sQp6MjhgxT46Am8sna+qtaBmqieTWpYySb2jRwPiPkT9xsqCeb7f5SB8F2r7/mjX4liy8tcILD4esu2E1sFYjSk0i+uFXg93OLxtpP5Zbrvcrmc8vSAie55ec11qQMfCu3ZD3Ccd7m7/L+a8uCC2F5gU0cjpi2LGRsWTBiAcolUKKN01xHfoL01CZDgxHbYN6SHCY4o3hW2cb6/j86mCEOHoAdJJYDCHAHoPh9brObFoyzKZ5/KHaIwIJAxPTIj1QV4rsb73Ob7OiqVnViyBnhqfOqHdoOB9S2599J7cPya5mOoGmuSB7j8TIK3Y36A6ztIdjKLTlN05sC03CgfJqlZ4ken2RjZVZ+8UqUEXorySZjVDLmlU2RTfnNlcso0BTK8ooupv31AP+46GLWkO73HBnU02Y0et2M3krjz0bEtkiAbV8XtmAIwTwEJANmUezoAi0PMS/HglEI07b/YjOw7IM3RBYXLSG3wbtZ96SnvprG4G+Lb2sMckIsV0DGXF9qDML5BoQDZvkO8rgHPUrdPLNvaSa+TUkWhNSwBaHrb0E2FoNhRw8XdtvtDAzXi3VvBQwFIcISnue4DotgiOcwtF1ucce3ESCu6wU5tn8iGUkYN55d9oLECW/5p98a7Mx3DKH88dTXc2EV9ZHFHNt3lJZRUofw2Aui9CJhtqRC2pKDHDvk2DQZX0EgnkX5Vg0AW18GbYbAUeZzqOgcSqfnpXMg0C8reopQR4Fupso8m85QZ89RRymzgCddZYbZDAV5DkYV6ZZzPCtzFnIg/55zBpmzHa253kN9TC2CAHOYHVF/ngFjA9e1uMC+juna2M/R8woBxGnLJ25fu0R/YejLZa+vcr5I6Lh6wlUrPZBhHlH4bQKFESvgPgzmAiVOqsLMsZgPgkBgJHyZI6REqqIcM4MNwwxajPyk3dwzxkqdnr7PmszkpeBf5kCYAY4jS+2niB/MZJgzBxxE0CLCxwF0OAk8mSPgUkNmCM82FjT6D9Leto0G3Ms1a6MwTPctmJfE/zLoMQQsxAABZAY9d64+HQ9bHnSZg9R3YJ5NLSlMh60NUWtTEOBHNd17EXq9th4l6dWctL8A/zK31Qx+nFjS44EnQAJ+xPOtICA8kNRmgU3yLJlESfiR1ZKX39FSdWN9Vo7vdPrS+7fnKPSlUPjLvFYRCoX2U3FX+yY0CunceWrbyNbFQYHgLkaOyLV2MwlC+pyunEjeMWjgd3Q921gguu16K10jhH9ZHccMgVT3zFDScLaLjREoJA8sHwWAIOoBT+QoB3m2Ok0YNh3AVD38kxTxGwj0tTruZaTgj29h9a5mkwLr/X7JJR5Qr3XnUY/Ny9i5gqTLWOD4Gq8kx1imyJSzsawzhZnswfWL2DcWhjXf78tmJw3D3+fOhQAQbGmHpgtmypjMRaEn1Y7E4RKqDQP3ghy7aKC0SUgNdbqmGbpkiwRho96Xip7pUTZzc+O3YZAr6AXQBZ69jEFb3aTlU+Bi6QnukxxDCgSlLUJoSuY2hBRit+A2gHAk3X8b7YckBCH68Y6Z97Z+jkaOIOZ46oPEYC+bWtBDgStc38Esz45Cz3O8FhtkKAx7E9ugkXkuvcDfqsLJZNB+LGXXb4V+Pey/mI5rytz9Ugs1vr+EWCavB2a2JmkYglS+J8Q2MsHRtFP+5DSjD8IRGgvKFawGY71qurmuPs2g02lMNhad0e3+G93sv/GtJoH6uvDeGqAKjjKC07ChjtNTEhr8kz06H8WpbSwD9MO+s7lSM769JBZfd25uDRYxSo41goxkt9Q/UGKaFXhTPjheo765UIzvL4nF192cW4NFlO5SSLHJ1f0daPxSEOiq6TQEfryX8R1ba4hsC1HXx3NPN4sT1TmzbJcwDHQjVZhnrgPKdizi2UmaxLC3/lEdet+qfO+1x5Ow20iD8Df6GJUQslwMgHYyLoHQltKzOPYC3w2Q79Ecwy00NcsVYkPKKGSGnMF0z8sfjUG1Q5AZMfgbPYy6A6UACM3TVukiVR5Qizrch9KDAQryzLvJdsQihkiLcWoN2aLkh57s19SlfDed/0V/X66rLuXnFoEe9PkMh3HyfED0HCWK9Uxj2/d5joHn1MxViA2li/NBVSkUblHAT0HES+9P+Y/PfX2nn1t3fS7ZUHgEucujtW3PDyxPCASlzyFyc0Qh5ikUEpHdlMSNL37s1Jq3ahedpuukpeCPB+F7pCATlocZw7OMfxoXkSDqW8h2GHYdTDyRY8a/nfbQ4GxzSmPvptc6JmysiyZsOr2+v7EemtntZSOP82z37XfSKOhntsgiC8htd9KEnfpDu99MZyTS35eZrUSiLSzEbTuYZWbTeU2KLXxgAQKYJ6XwZZBrIlhaJkLTPJPXestsqFA0R1AGLVeTf2Ol4uL+kszwS2IoENBU4JnALQk8m3W00n71DUZjfHtJ19HrknlrwChYZq41AVkX9rbr58EofEjH0ehv9F9jZFtqlwyRM/PazMumBLFtS5cqeRK7ngNBjvtlO40/appzyD9t1twXK+cRDGUr1G2ON1QePt9gkh1eHz27PRIxpZwZM8Fx2wXiQ/tfOe4MnTQK2O9z4nCouwxRR1BPaqHouotOICKwOLUJ4h6zPSfHHQsVmVRZYxkfNLVh/pwJnMUyqRfPx7vuU+H6nt9Uny7OrqxFjviPaw8TsQXenb1VHKAoh/bq1UL5cgRODh7au+rP+dVNrXjzoF5Viuqv/fHe7r0+7kxg8Vi/2LsrHN3enalX4ZX667T4cHiF77itf1soNooX1UuyK7EyLgvwzrsquId2ta/IUzpVR0TRKxR3j/gufqg9HHV3/ZG7eznYk3t/r45HZXLcP3osFMPCfuFi9/7iQoJOsBvePfU67qM4fbxS0qFUKd/hA2I753eKTRXsCp3qSYeeTvFuuDcI+P5w9zw8Orvd9Y4unLMj+2rXu7+4PzsiF7v2/eV9uEsuHkb3l+ShQEYP43slXgvt0eP4qDCuFR5GjzX1wLVSfbRf29sv1A7ru4ePe/ulh8O94sHjXkU96kHxYH+vXNytHBRvT3fLxaOzg6I82pVnuV4T16EoTDvjg/EBPTlFBVIqFQ87p3eP94+eDEoV9xh3vHPO/97au57n1e1W0NmX02n36FDWhvYVaxX7lfB6cmbX7o6Gx4fsAt/4l2CPe9UhOjjdP7Iv1FLq/48LTTIdXin0Fur1W+9y7+ZAvbQHRwE6PN1vH5yfXp5eUFmqsl2g1MTYPcfsYDi6b/FpwQ0PafX+5h5yG8IibFxrMdLYOy75t+R87DtNP2CX962z7tnFcbd1OLzoajEzaQ80JwYNtt/unLROdh2hrkiPLg9ZsTsq3dC7SXu8W2XeOe7s3gfqM3zQrw6bZ64Pzhqn/UJ3uMfEgz8uj552L4/V58w79cPzxt3DYfSu4DxO7k9HoxvnXpwMC8Oq3H8MLvRF4U3jHp1PeuPDxvXj6A5j0vBOb9VvgnL37JrfXKID/6ZUctv0JNjdr8rClXc09XePr3noDdqn5WIlYKe3g0fQ8R7GZ3eXtZpoDid706cBOVJnaU8hLVba49O7EW459tnpBRyBx8GIPI7379q359q8mVypv9vq/8ej0X7lftTsn0HmonEFjEJemdo8rA4bnFz69cPi/l1hUDnujILW9OyuxKtOkeARb43L52zK7+Rj8TF4Gh9Vm/hvs1P7yyqP6rSXt43xYb3Weaz7DcWWj9OpX7u7aB3QI0p6zG12a0qiFs6un2bMU725b7jtQrdSb3a8i73q4OG2bXcbe+fXY6lsn1L9tjjYh+zKDvZl5b44lU9lWXh0uxV+7MtyoFbNvyt3dgu7k8qAjm6OJNs9uaWHl3/PHfB3OqjvtlqXWtC6j0TWii3/4PySV0ndhntXh2Xa7kwJLEZ9HMb3e4dFWS1XLk9PydWepklR/RWtQK+Ex5eFY48Wu9Oi6ITnykgslR/6F53J5XWvwsrHYTUclAi7HOwWy8GhdzaunQbMbhYKrcvHG4nLx63R5Fqdr+TsOkqkFvynk6sB2+P4uiIPx6d2ZUz68Ar2bk4arpKJpWq5XDitKD1TuH869YpTgP+Gp/T+asqGxw8XgvS7j+fkktS4snULBe/s4XIfP6Lu2Z1i31bj4Bxc+3uV3UnXc+3biu/dQvJ0OAor9+rbvQpg9lVR7hXOyuf6MTv38p4OL3ebh/qpNUi86Z22Xpy9ysGovB+U9eOWptfjSn23enAucXE6wtVurbBbUdf0lFItnARVMDkqgHs4bfLxkfMXy4dr5wbjw85h16/c9cbXZ+JafbE2BNfdcHwjxkgL08fp/dVhsY1O9aUlQ7DhPRTAkV2sO54S2YXpXyWI7PrRzcUjoV1cIIVq+5Jclq4frx+dg8YU9GTXPxwPzoPbbumu/FRsHNZa1UHh5mC6d9UeFyqF0qmEtWtWVI9ewpPb6aRWfLJPoHMrT0andqlTJRfdx4MCuTskdHRFerI9Pj/fO/fp8Pp+/1RJjuuL6Z1kvnN6p8306+JJ/6LuyEOnMe5XgyY+Oru8buucl9JtMLk5G+lvwU7/4ObosCl2L6jnHP5VD3JQ6SBwePm4z3aLRfvmRkw0v+5LVuj0rgG/A57milG3XPZ2C+KgvidPxlpnHXUuZWRCFU4amNX3i4C7N85wXCqro7WL3cmBDI+qt/cjhZICdIsRblv3vSMtWp3J4OGKFK5643G5QYk/aQ12kX97al+TQ0fJOPdMfeugFD6K9un1lX97BS/ru+jeJsEE3j14zVEBXtTv0f3tdVUr08fBVfO8+KRgNS5el7vjnu8EF43RceOyfkHGdwR4/uhM2TRUH8CXznDSCr3ol8X+4Vgt7qE6//1ofO7dDYa2uj/kKGIdHD8eP/KSK+sj9dWnfrHS6o07o0tyULqsP0z+3j7qBYDs/sI9rd2eEwGuqqLLbo6QPyjtnQ9HjJxoecbGl6XgfNpFB5PyY/GhcuDdkE/Rjbnr4+PR9dPtuSjcBTJSJp5SU0rya6U0nZ7RSmUSCSK9DPbIfjx5OqiLCj0e8dLfYqVzrQ0dGZxKTO+0ZHk48Tx0QK9x9TisKd4snQ0ORdW7UdZtZ19DCp6PNTOE/Qv97rohXM6x2jPealV33Zpq+VxlPZ3OUJhyerdvDy6Gtfbwmh/33XpbixCN3epu76j6VL05Cvxqaz/AV3/vW6ey05oKKLVEnVyjqqieXu67x3+puNViZTKAvL+nm4QVPP140fOq/2nlDzViw/N+eailzVn/dujjmrauJkf100rb7d7gLr6l1VCz2nm/ctQ6GV444Go4uK2MHvktpNWp2z07JIzq35PrU3ooixUoGlrXQbUcYugcDJ+Ohk2uVSCcdCfwlnSDp6EOC5Y6waA7Lt0NryWowkbzTiDn6O/NEBGf9nsXQe+qOxhfawWKu4NeSzPDDUQHhRGskEn3xrl+OsBVuzrUNJr025cwmF6f3J+rU1679f69UjgK3aX96dHVCeM4kJp9K5fyAveql7VAFwZedx8n4PboBl9daIvgrlwYNxA77El2bYvjCq/uh60q7Wi9eXLtIXZ8gM/GF115Jqb0pq3lJ5tqoBzsnx6hI1kuDJ6elAxRJlGhP3pU8rxw+bfsjTk833cHD6JH5fi8XrmWJwfY6Vdunfuwuve3f/1E/eP9Sl8ee2zsn5Qr+ofnDO6djfcrgWZmpA2Zv2cV8nfqn+KuV9rvlHtaQE30X+zM89v3zdKDHD519u9OHq87t7f33erxtQBOdXJ4uleDjuO79RYa9mvw5AhWtVG2f3MylQ3ISrz8eHHPawPZnV45YZUqst+Wwr1T0VG6/bx+dWKfQjrhNVh3z4fF/WGdPj1MmCzUO8gfVv/u++Vy6+SoUZWVE+7sFYL+U8WvdkqNq4PbOxEpKuiKYjh2KgcPA32/jwe1C9bvNod7drXu+hf+3eW9f3sjYXe4Xynaj5UjZwx3vaK6v1rr6qRfuC7ASghOu/2D3j0ZlTWM64+ef1u7RUpY1YeX+EyCcgiaf2ELnV+eXaIhdPcOj51OeFFzzwZPTdEBlV7jL78aP3SFPw29/dMSkqcj1KU+2NecUiAP+3bvENQG0y44CS+65+deudd4uD+B4WPdGe5Nodd97KBB6bhXvW3faGXtaVDjZqRrmtUubNpl93Rc2hPDg3IgQ4pGFDqtvVN4d1R2g6rTUyS/uR0MxnjQOizXtDYmbf9wvz/jyWMXjp9w96Gidomo0D0Dwd31kISdyfS0L58g5IfX+1Myxpc1eDf0taVmT8nTWNtsBds/PD5vTbo6N7SkydvssruwSVgHn1a74dWEiSHFe5oj9jzg26fH4qmGNe9MemI/OL9W0gy1n9i0dX14PZheDLpi6uIyKTdO8F6tMe0Wyz3nZKD1Krv6e6JBeYrqh7f7l+HB7t9xL8Tt6eVA1JrjQRPctDrtgye11fxbvau6juKVCjm/8U5bZ7fl3mXXvfD6rb7PZk/90EX3bIiephWPMX+/6TnO2fTauZ4cDRRxvCcMBrLoe8NSax/S5plzPKgJZ6B48+/Iv20Xw8Hwuh2M9KahVMWDznW53WpeXlSmd21/l9Z2K8P+3lmldVc+D1pHWvjhy2FnXChAX1OvNzhooP7jsM26xwPYPjw4pQfysn8zOH28u967fxSPe6w17d40a3u1fnl0yG6CbuiDU3x3dfCgqQRKnttFx71O4/YIXnXKe+PKoHLBWxN2GzyUqlV/XIPlG2XF4p5ri95p+Yjfe7vBnQPLjP4NHgsjbXHeugf7StQe9asNoYe/FUJy3Zs0mg4M3E5FENxxZQ9Ou3WkbZ1rZ6j31SMUlqrOEQoRdsvOo0bkyZ1alL8dBa/94Vm1wg5OvMPb0djpXkrQvTm6Dp+GhDwe/+WywNx7CIannStZPS8+hq1HaD+J9vno6d4fVGsDoi5ww8/gYXMaDMioA8rDE0HuKv3yFTz3j8dNqQyZQvlGsezhTfks8OSUnJcnuFxrkCIrsLvyUUXzhdMGj61O70mcXE/9p3rz7LA7PXSP7HG7yh/1+p3c3wwPTu7DMvZG43Yd3hWnJyfn4MY7uBwETyc38Kn9SMvtEB+hKi81KjXcPGfdB3zBIuaT3adRoXz5OA6loAedvaESDM2mrB8MhlqpUk0jl7r1xtO4dnxY5PSQPdz2J0RnbRUK7dug5u/5XrF1Xa5dhceYX3V8FuLj24tj76S0f4Lk4V/YLZ0eXo7oLTu/PNCcxcp00nJrRZcCJaK6w861N534NarA8ngRVC8PzrWY69pPt4fw9kr2L08Pe02sjKZjst85dknv/vyekvvBMZqcgwmW9Xu7pb5+3ejVT6o2x8XAa8ECPuOaW0/BPaj3cLn/+NBpi5Y7vLtoV29vcO1ijC73W/x8twsei05v6pc6RzaaBi6ZTImsCsUuxT1RkWSqrYzScaVxAjRstLjSdn65HE41lYfa/Sca15Nu/9h/PKpAuzP15O50t+vb3u3DUXEzLLb3XPO0cHZZIZXb+4fQ+XuK3e6RdmbtXt1Uy5cndO/+6Eh722JHZsZrafBtvpyNlmo/m00LXyTfJiI778vI7elhCc+fHfScTu2s7WsXevH/AQ==</diagram></mxfile>
\ No newline at end of file diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md index 0d478d73..5a2ae534 100644 --- a/docs/Demo-Sites.md +++ b/docs/Demo-Sites.md @@ -7,7 +7,6 @@ Location | Netdata demo URL | 60 mins reqs | VM Donated by London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**<br/>(this is the global Netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**<br/>(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/) Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com) -Roubaix (France)|**[ventureer.my-netdata.io](https://ventureer.my-netdata.io)**|[![Requests Per Second](https://ventureer.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://ventureer.my-netdata.io)|[Ventureer.com](https://ventureer.com/) Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/) Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..8dd0c7a6 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,7 @@ +# Read documentation on https://docs.netdata.cloud + +Welcome to the Netdata documentation! While you can read Netdata documentation here, or throughout the Netdata repository, our intention is that these pages are read on [docs.netdata.cloud](https://docs.netdata.cloud). + +Links between documentation pages will work fine here, but the formatting may not be perfect, as our documentation site uses a few extra Markdown features that GitHub doesn't support natively. Other things might be missing or look less than perfect. + +Now get out there and build an exceptional infrastructure.
\ No newline at end of file diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md index a71897f4..c4def5f6 100644 --- a/docs/Running-behind-apache.md +++ b/docs/Running-behind-apache.md @@ -3,7 +3,7 @@ Below you can find instructions for configuring an apache server to: 1. proxy a single Netdata via an HTTP and HTTPS virtual host -2. dynamically proxy any number of Netdata +2. dynamically proxy any number of Netdata servers 3. add user authentication 4. adjust Netdata settings to get optimal results @@ -145,13 +145,15 @@ sudo a2ensite netdata.conf && service apache2 reload ## Netdata proxy in Plesk _Assuming the main goal is to make Netdata running in HTTPS._ + 1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate 2. Go to "Apache & nginx Settings", and in the following section, add: + ``` RewriteEngine on RewriteRule (.*) http://localhost:19999/$1 [P,L] ``` -3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. +3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. Repeat the operation for as many servers as you need. @@ -164,6 +166,7 @@ Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get instal Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata` +**Apache 2.2 Example:** Modify the virtual host with these: ``` @@ -186,6 +189,34 @@ Modify the virtual host with these: Specify `Location /` if Netdata is running on dedicated virtual host. + + +**Apache 2.4 (dedicated virtual host) Example:** + +``` +<VirtualHost *:80> + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + ServerName netdata.domain.tld + + <Proxy *> + AllowOverride None + AuthType Basic + AuthName "Protected site" + AuthUserFile /etc/apache2/.htpasswd + Require valid-user + </Proxy> + + ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/" "http://localhost:19999/" + + ErrorLog ${APACHE_LOG_DIR}/netdata-error.log + CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined +</VirtualHost> +``` + Note: Changes are applied by reloading or restarting Apache. # Netdata configuration @@ -230,6 +261,14 @@ You can also use a unix domain socket. This will also provide a faster route bet [web] bind to = unix:/tmp/netdata.sock ``` + +Apache 2.4.24+ can not read from `/tmp` so create your socket in `/var/run/netdata` + +``` +[web] + bind to = unix:/var/run/netdata/netdata.sock +``` + _note: Netdata v1.8+ support unix domain sockets_ At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this: @@ -265,6 +304,6 @@ apache logs accesses and Netdata logs them too. You can prevent Netdata from gen Make sure the requests reach Netdata, by examing `/var/log/netdata/access.log`. 1. if the requests do not reach Netdata, your apache does not forward them. -2. if the requests reach Netdata by the URLs are wrong, you have not re-written them properly. +2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly. [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-haproxy.md b/docs/Running-behind-haproxy.md new file mode 100644 index 00000000..2c1835f5 --- /dev/null +++ b/docs/Running-behind-haproxy.md @@ -0,0 +1,280 @@ +# Netdata via HAProxy + +> HAProxy is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic web sites and powers quite a number of the world's most visited ones. + +If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. This can make it possible to connect to Netdata at https://example.com or https://example.com/netdata/, which is a much nicer experience then http://example.com:19999. + +To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata, the following configuration can be used: + +## Default Configuration + +For all examples, set the mode to `http` + +``` +defaults + mode http +``` + +## Simple Configuration + +A simple example where the base URL, say http://example.com, is used with no subpath: + +### Frontend + +Create a frontend to recieve the request. + +``` +frontend http_frontend + ## HTTP ipv4 and ipv6 on all ips ## + bind :::80 v4v6 + + default_backend netdata_backend +``` + +### Backend + +Create the Netdata backend which will send requests to port `19999`. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Configuration with subpath + +A example where the base URL is used with a subpath `/netdata/`: + +### Frontend + +To use a subpath, create an ACL, which will set a variable based on the subpath. + +``` +frontend http_frontend + ## HTTP ipv4 and ipv6 on all ips ## + bind :::80 v4v6 + + # URL begins with /netdata + acl is_netdata url_beg /netdata + + # if trailing slash is missing, redirect to /netdata/ + http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ } + + ## Backends ## + use_backend netdata_backend if is_netdata + + # Other requests go here (optional) + # put netdata_backend here if no others are used + default_backend www_backend +``` + +### Backend + +Same as simple example, expept remove `/netdata/` with regex. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-path %[path,regsub(^/netdata/,/)] + + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Using TLS communication + +TLS can be used by adding port `443` and a cert to the frontend. This example will only use Netdata if host matches example.com (replace with your domain). + +### Frontend + +This frontend uses a certificate list. + +``` +frontend https_frontend + ## HTTP ## + bind :::80 v4v6 + # Redirect all HTTP traffic to HTTPS with 301 redirect + redirect scheme https code 301 if !{ ssl_fc } + + ## HTTPS ## + # Bind to all v4/v6 addresses, use a list of certs in file + bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt + + ## ACL ## + # Optionally check host for Netdata + acl is_example_host hdr_sub(host) -i example.com + + ## Backends ## + use_backend netdata_backend if is_example_host + # Other requests go here (optional) + default_backend www_backend +``` + +In the cert list file place a mapping from a certificate file to the domain used: + +`/etc/letsencrypt/certslist.txt`: + +``` +example.com /etc/letsencrypt/live/example.com/example.com.pem +``` + +The file `/etc/letsencrypt/live/example.com/example.com.pem` should contain the key and certificate (in that order) concatenated into a `.pem` file.: + +``` +$ cat /etc/letsencrypt/live/example.com/fullchain.pem \ + /etc/letsencrypt/live/example.com/privkey.pem > \ + /etc/letsencrypt/live/example.com/example.com.pem +``` + +### Backend + +Same as simple, except set protocol `https`. + +``` +backend netdata_backend + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request add-header X-Forwarded-Proto https + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +## Enable authentication + +To use basic HTTP Authentication, create a authentication list: + +``` +# HTTP Auth +userlist basic-auth-list + group is-admin + # Plaintext password + user admin password passwordhere groups is-admin +``` + +You can create a hashed password using the `mkpassword` utility. + +``` +$ printf "passwordhere" | mkpasswd --stdin --method=sha-256 +$5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 +``` + +Replace `passwordhere` with hash: + +``` +user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin +``` + +Now add at the top of the backend: + +``` +acl devops-auth http_auth_group(basic-auth-list) is-admin +http-request auth realm netdata_local unless devops-auth +``` + +## Full Example + +Full example configuration with HTTP auth over TLS with subpath: + +``` +global + maxconn 20000 + + log /dev/log local0 + log /dev/log local1 notice + user haproxy + group haproxy + pidfile /run/haproxy.pid + + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + daemon + + tune.ssl.default-dh-param 4096 # Max size of DHE key + + # Default ciphers to use on SSL-enabled listening sockets. + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +frontend https_frontend + ## HTTP ## + bind :::80 v4v6 + # Redirect all HTTP traffic to HTTPS with 301 redirect + redirect scheme https code 301 if !{ ssl_fc } + + ## HTTPS ## + # Bind to all v4/v6 addresses, use a list of certs in file + bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt + + ## ACL ## + # Optionally check host for Netdata + acl is_example_host hdr_sub(host) -i example.com + acl is_netdata url_beg /netdata + + http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ } + + ## Backends ## + use_backend netdata_backend if is_example_host is_netdata + default_backend www_backend + +# HTTP Auth +userlist basic-auth-list + group is-admin + # Hashed password + user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin + +## Default server(s) (optional)## +backend www_backend + mode http + balance roundrobin + timeout connect 5s + timeout server 30s + timeout queue 30s + + http-request add-header 'X-Forwarded-Proto: https' + server other_server 111.111.111.111:80 check + +backend netdata_backend + acl devops-auth http_auth_group(basic-auth-list) is-admin + http-request auth realm netdata_local unless devops-auth + + option forwardfor + server netdata_local 127.0.0.1:19999 + + http-request set-path %[path,regsub(^/netdata/,/)] + + http-request add-header X-Forwarded-Proto https + http-request set-header Host %[src] + http-request set-header X-Forwarded-For %[src] + http-request set-header X-Forwarded-Port %[dst_port] + http-request set-header Connection "keep-alive" +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-haproxy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md index b38d27fa..81ebc1a7 100644 --- a/docs/Running-behind-nginx.md +++ b/docs/Running-behind-nginx.md @@ -1,9 +1,43 @@ -# Netdata via nginx +# Running Netdata behind Nginx -To pass Netdata via a nginx, use this: +## Intro + +[Nginx](https://nginx.org/en/) is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server used to host websites and applications of all sizes. + +The software is known for its low impact on memory resources, high scalability, and its modular, event-driven architecture which can offer secure, predictable performance. + +## Why Nginx + +- By default, Nginx is fast and lightweight out of the box. + +- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server. + +- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism. + +- A proxy was necessary to encrypt the communication to netdata, until v1.16.0, which provided TLS (HTTPS) support. + +## Nginx configuration file + +All Nginx configurations can be found in the `/etc/nginx/` directory. The main configuration file is `/etc/nginx/nginx.conf`. Website or app-specific configurations can be found in the `/etc/nginx/site-available/` directory. + +Configuration options in Nginx are known as directives. Directives are organized into groups known as blocks or contexts. The two terms can be used interchangeably. + +Depending on your installation source, you’ll find an example configuration file at `/etc/nginx/conf.d/default.conf` or `etc/nginx/sites-enabled/default`, in some cases you may have to manually create the `sites-available` and `sites-enabled` directories. + +You can edit the Nginx configuration file with Nano, Vim or any other text editors you are comfortable with. + +After making changes to the configuration files: + +- Test Nginx configuration with `nginx -t`. + +- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`. + +## Ways to access Netdata via Nginx ### As a virtual host +With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below. + ``` upstream backend { # the Netdata server @@ -30,9 +64,11 @@ server { } } ``` - ### As a subfolder to an existing virtual host +This method is recommended when Netdata is to be served from a subfolder (or directory). +In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`. + ``` upstream netdata { server 127.0.0.1:19999; @@ -69,7 +105,9 @@ server { } ``` -### As a subfolder for multiple Netdata servers, via one nginx +### As a subfolder for multiple Netdata servers, via one Nginx + +This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via subfolders. ``` upstream backend-server1 { @@ -114,34 +152,33 @@ Of course you can add as many backend servers as you like. Using the above, you access Netdata on the backend servers, like this: -- `http://nginx.server/netdata/server1/` to reach `backend-server1` -- `http://nginx.server/netdata/server2/` to reach `backend-server2` - -### Using TLS communication +- `http://netdata.example.com/netdata/server1/` to reach `backend-server1` +- `http://netdata.example.com/netdata/server2/` to reach `backend-server2` -In case the Netdata web server has been [configured to use TLS](../web/server/#enabling-tls-support), -you must also encrypt the communication between Nginx and Netdata. +### Encrypt the communication between Nginx and Netdata -To enable encryption, first [enable SSL on nginx](http://nginx.org/en/docs/http/configuring_https_servers.html) and then put the following in the location section of the Nginx configuration: +In case Netdata's web server has been [configured to use TLS](../web/server/#enabling-tls-support), it is necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append the following parameters in your `nginx.conf` ``` proxy_set_header X-Forwarded-Proto https; proxy_pass https://localhost:19999; ``` -If nginx is not configured as described here, you will probably receive the error `SSL_ERROR_RX_RECORD_TOO_LONG`. +Optionally it is also possible to [enable TLS/SSL on Nginx](http://nginx.org/en/docs/http/configuring_https_servers.html), this way the user will encrypt not only the communication between Nginx and Netdata but also between the user and Nginx. + +If Nginx is not configured as described here, you will probably receive the error `SSL_ERROR_RX_RECORD_TOO_LONG`. ### Enable authentication -Create an authentication file to enable the nginx basic authentication. -Do not use authentication without SSL/TLS! -If you haven't one you can do the following: +Create an authentication file to enable basic authentication via Nginx, this secures your Netdata dashboard. + +If you don't have an authentication file, you can use the following command: ``` printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords ``` -And enable the authentication inside your server directive: +And then enable the authentication inside your server directive: ``` server { @@ -152,9 +189,9 @@ server { } ``` -## limit direct access to Netdata +## Limit direct access to Netdata -If your nginx is on `localhost`, you can use this to protect your Netdata: +If your Nginx is on `localhost`, you can use this to protect your Netdata: ``` [web] @@ -163,7 +200,7 @@ If your nginx is on `localhost`, you can use this to protect your Netdata: --- -You can also use a unix domain socket. This will also provide a faster route between nginx and Netdata: +You can also use a unix domain socket. This will also provide a faster route between Nginx and Netdata: ``` [web] @@ -171,7 +208,7 @@ You can also use a unix domain socket. This will also provide a faster route bet ``` _note: Netdata v1.8+ support unix domain sockets_ -At the nginx side, use something like this to use the same unix domain socket: +At the Nginx side, use something like this to use the same unix domain socket: ``` upstream backend { @@ -182,7 +219,7 @@ upstream backend { --- -If your nginx server is not on localhost, you can set: +If your Nginx server is not on localhost, you can set: ``` [web] @@ -194,9 +231,9 @@ _note: Netdata v1.9+ support `allow connections from`_ `allow connections from` accepts [Netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. -## prevent the double access.log +## Prevent the double access.log -nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: +Nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: ``` [global] @@ -205,7 +242,7 @@ nginx logs accesses and Netdata logs them too. You can prevent Netdata from gene ## SELinux -If you get an 502 Bad Gateway error you might check your nginx error log: +If you get an 502 Bad Gateway error you might check your Nginx error log: ```sh # cat /var/log/nginx/error.log: @@ -215,4 +252,4 @@ If you get an 502 Bad Gateway error you might check your nginx error log: If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`. -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
\ No newline at end of file diff --git a/docs/configuration-guide.md b/docs/configuration-guide.md index 2a9539dc..1c79e027 100644 --- a/docs/configuration-guide.md +++ b/docs/configuration-guide.md @@ -59,7 +59,7 @@ Entire plugins can be turned off from the [netdata.conf [plugins]](../daemon/con ##### Show charts with zero metrics -By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. +By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. ### Modify alarms and notifications diff --git a/docs/generator/buildhtml.sh b/docs/generator/buildhtml.sh index e1c108fb..dbd30391 100755 --- a/docs/generator/buildhtml.sh +++ b/docs/generator/buildhtml.sh @@ -27,10 +27,9 @@ find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o # Copy Netdata html resources cp -a ./${GENERATOR_DIR}/custom ./${SRC_DIR}/ - # Modify the first line of the main README.md, to enable proper static html generation echo "Modifying README header" -sed -i -e '0,/# Netdata /s//# Introduction\n\n/' ${SRC_DIR}/README.md +sed -i -e '0,/# Netdata /s//# Netdata Documentation\n\n/' ${SRC_DIR}/README.md # Remove all GA tracking code find ${SRC_DIR} -name "*.md" -print0 | xargs -0 sed -i -e 's/\[!\[analytics.*UA-64295674-3)\]()//g' @@ -39,7 +38,6 @@ find ${SRC_DIR} -name "*.md" -print0 | xargs -0 sed -i -e 's/\[!\[analytics.*UA- declare -a EXCLUDE_LIST=( "HISTORICAL_CHANGELOG.md" "contrib/sles11/README.md" - "packaging/maintainers/README.md" ) for f in "${EXCLUDE_LIST[@]}"; do @@ -56,13 +54,12 @@ MKDOCS_CONFIG_FILE="${GENERATOR_DIR}/mkdocs.yml" MKDOCS_DIR="doc" DOCS_DIR=${GENERATOR_DIR}/${MKDOCS_DIR} rm -rf ${DOCS_DIR} -mkdir ${DOCS_DIR} prep_html() { lang="${1}" echo "Creating ${lang} mkdocs.yaml" - if [ "${lang}" = "en" ] ; then + if [ "${lang}" == "en" ] ; then SITE_DIR="build" else SITE_DIR="build/${lang}" @@ -86,16 +83,21 @@ prep_html() { if [ "${lang}" != "en" ] ; then find "${GENERATOR_DIR}/${SITE_DIR}" -name "*.html" -print0 | xargs -0 sed -i -e 's/https:\/\/github.com\/netdata\/netdata\/blob\/master\/\S*md/https:\/\/github.com\/netdata\/localization\//g' fi + + # Replace index.html with DOCUMENTATION/index.html. Since we're moving it up one directory, we need to remove ../ from the links + echo "Replacing index.html with DOCUMENTATION/index.html" + sed 's/\.\.\///g' ${GENERATOR_DIR}/${SITE_DIR}/DOCUMENTATION/index.html > ${GENERATOR_DIR}/${SITE_DIR}/index.html + } for d in "en" $(find ${LOC_DIR} -mindepth 1 -maxdepth 1 -name .git -prune -o -type d -printf '%f ') ; do echo "Preparing source for $d" - cp -a ${SRC_DIR}/* ${DOCS_DIR}/ + cp -r ${SRC_DIR} ${DOCS_DIR} if [ "${d}" != "en" ] ; then cp -a ${LOC_DIR}/${d}/* ${DOCS_DIR}/ fi prep_html $d - rm -rf ${DOCS_DIR}/* + rm -rf ${DOCS_DIR} done # Remove cloned projects and temp directories diff --git a/docs/generator/buildyaml.sh b/docs/generator/buildyaml.sh index e4a5466a..f887c695 100755 --- a/docs/generator/buildyaml.sh +++ b/docs/generator/buildyaml.sh @@ -48,11 +48,12 @@ navpart() { } echo -e 'site_name: Netdata Documentation +site_url: https://docs.netdata.cloud repo_url: https://github.com/netdata/netdata repo_name: GitHub edit_uri: blob/master site_description: Netdata Documentation -copyright: Netdata, 2018 +copyright: Netdata, 2019 docs_dir: '${docs_dir}' site_dir: '${site_dir}' #use_directory_urls: false @@ -67,6 +68,9 @@ extra: link: "https://www.facebook.com/linuxnetdata/" theme: name: "material" + palette: + primary: "blue grey" + accent: "light green" custom_dir: custom/themes/material favicon: custom/img/favicon.ico language: '${language}' @@ -85,7 +89,6 @@ markdown_extensions: - footnotes - tables - admonition - - codehilite - meta - sane_lists - smarty @@ -99,6 +102,9 @@ markdown_extensions: - pymdownx.caret - pymdownx.critic - pymdownx.details + - pymdownx.highlight: + pygments_style: manni + noclasses: true - pymdownx.inlinehilite - pymdownx.magiclink - pymdownx.mark @@ -117,9 +123,12 @@ markdown_extensions: - pymdownx.extrarawhtml nav:' -navpart 1 . README "About" +navpart 1 . "README" "" -echo -ne " - 'docs/Demo-Sites.md' +navpart 1 . . "About Netdata" + +echo -ne " - 'docs/what-is-netdata.md' + - 'docs/Demo-Sites.md' - 'docs/netdata-security.md' - 'docs/anonymous-statistics.md' - 'docs/Donations-netdata-has-received.md' @@ -138,6 +147,7 @@ echo -ne " - 'docs/Demo-Sites.md' - 'packaging/installer/README.md' - 'packaging/docker/README.md' - 'packaging/installer/UPDATE.md' + - 'packaging/DISTRIBUTIONS.md' - 'packaging/installer/UNINSTALL.md' - 'docs/GettingStarted.md' - Running Netdata: @@ -153,6 +163,7 @@ echo -ne " - Running behind another web server: - 'docs/Running-behind-apache.md' - 'docs/Running-behind-lighttpd.md' - 'docs/Running-behind-caddy.md' + - 'docs/Running-behind-haproxy.md' " #navpart 2 system navpart 2 database @@ -253,9 +264,10 @@ navpart 2 web/api/badges "" "" 2 navpart 2 web/api/health "" "" 2 navpart 2 web/api/queries "" "Queries" 2 -echo -ne "- Hacking Netdata: +echo -ne "- Additional Info: - CODE_OF_CONDUCT.md - CONTRIBUTORS.md + - packaging/maintainers/README.md " navpart 2 packaging/makeself "" "" 4 navpart 2 libnetdata "" "libnetdata" 4 diff --git a/docs/generator/checklinks.sh b/docs/generator/checklinks.sh index acc14465..5012ad17 100755 --- a/docs/generator/checklinks.sh +++ b/docs/generator/checklinks.sh @@ -5,6 +5,8 @@ # Validates and tries to fix all links that will cause issues either in the repo, or in the html site GENERATOR_DIR="docs/generator" +MKDOCS_DIR="doc" +DOCS_DIR=${GENERATOR_DIR}/${MKDOCS_DIR} dbg () { if [ "$VERBOSE" -eq 1 ] ; then printf "%s\\n" "${1}" ; fi @@ -186,25 +188,27 @@ ck_netdata_relative () { fi ;; * ) - if [ -f "$fpath/$rlnk" ] ; then - dbg "-- # (path/someotherfile) $rlnk" - if [ "$fpath" = "." ] ; then - s="https://github.com/netdata/netdata/tree/master/$rlnk" - else - s="https://github.com/netdata/netdata/tree/master/$fpath/$rlnk" + if [ -d "$fpath/$rlnk" ] ; then + dbg "-- # (path) -> htmldoc (path/)" + testf "$f" "$fpath/$rlnk/README.md" + if [ $? -eq 0 ] ; then + s="$rlnk/" + if [ "$fname" != "README.md" ] ; then s="../$s"; fi fi else - if [ -d "$fpath/$rlnk" ] ; then - dbg "-- # (path) -> htmldoc (path/)" - testf "$f" "$fpath/$rlnk/README.md" - if [ $? -eq 0 ] ; then - s="$rlnk/" - if [ "$fname" != "README.md" ] ; then s="../$s"; fi + cd - >/dev/null + if [ -f "$fpath/$rlnk" ] ; then + dbg "-- # (path/someotherfile) $rlnk" + if [ "$fpath" = "." ] ; then + s="https://github.com/netdata/netdata/tree/master/$rlnk" + else + s="https://github.com/netdata/netdata/tree/master/$fpath/$rlnk" fi else echo "-- ERROR: $f - $rlnk is neither a file or a directory. Giving up!" EXITCODE=1 fi + cd $DOCS_DIR >/dev/null fi ;; esac @@ -212,7 +216,7 @@ ck_netdata_relative () { if [[ ! -z $s ]] ; then srch=$(echo "$rlnk" | sed 's/\//\\\//g') rplc=$(echo "$s" | sed 's/\//\\\//g') - fix "sed -i 's/($srch)/($rplc)/g' $GENERATOR_DIR/doc/$f" + fix "sed -i 's/($srch)/($rplc)/g' $f" fi } @@ -314,9 +318,11 @@ if [ -z "${file}" ] ; then printhelp exit 1 fi + cd ${DOCS_DIR} for f in $(find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o -name "*.md" -print); do checklinks "$f" done + cd - else if [ $RECURSIVE -eq 1 ] ; then printhelp diff --git a/docs/generator/custom/css/netdata.css b/docs/generator/custom/css/netdata.css index d9003be1..27f1b08c 100644 --- a/docs/generator/custom/css/netdata.css +++ b/docs/generator/custom/css/netdata.css @@ -5,3 +5,71 @@ .md-typeset { font-size: .75rem } + +/* Underline text */ + +.md-typeset a:not(.nav-button):not(.md-icon):not(.headerlink) { + border-bottom: 1px solid #272b30; +} + +/* Custom styling for the new documentation homepage. + In particular, the three buttons for install/getting started/configuration. */ + +.homepage-nav { + display: flex; + margin-top: 1.4rem; +} + +.homepage-nav div { + flex: 1; +} + +.homepage-nav .nav-install { + margin-right: 1rem; +} + +.homepage-nav .nav-configuration { + margin-left: 1rem; +} + +.nav-button { + border: 2px solid black; + border-radius: 4px; + display: block; + font-weight: 700; + margin: 0 auto; + padding: 0.6rem 0; + text-align: center; +} + +/* Hide the label at the top of the navigation menu. Does nothing. + Well, it does do something on mobile, and this media query makes + sure it's hidden only on screens wide enough to not use the mobile sidebar. */ +@media only screen and (min-width:76.25em) { + .md-nav--primary .md-nav__title { + display: none; + } +} + +/* Change the language selector dropdown to match new color. */ +.md-header-nav select#sel { + background-color: rgba(0,0,0,.26) !important; + padding: 3px; + margin-left: 5px; + margin-right: 20px; +} + +/* Add some whitespace to the bottom of each doc. */ +.md-content { + margin-bottom: 6rem; +} + +/* Make sure inline code in tables doesn't break. */ +.md-typeset__table code { + word-break: normal; +} + +/* Bold the first item on the docs sidebar: Netdata Documentation */ +.md-nav--primary > .md-nav__list > .md-nav__item:first-of-type { + font-weight: 700; +}
\ No newline at end of file diff --git a/docs/netdata-security.md b/docs/netdata-security.md index 955abebd..a905717d 100644 --- a/docs/netdata-security.md +++ b/docs/netdata-security.md @@ -89,7 +89,7 @@ In Netdata v1.9+ there is also access list support, like this: #### Use an authenticating web server in proxy mode -Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md#netdata-via-nginx), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy). +Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy). To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): diff --git a/docs/what-is-netdata.md b/docs/what-is-netdata.md new file mode 100644 index 00000000..6664897d --- /dev/null +++ b/docs/what-is-netdata.md @@ -0,0 +1,385 @@ +# What is Netdata? + +[![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() + +[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python) + +--- + +**Netdata** is **distributed, real-time, performance and health monitoring for systems and applications**. It is a highly optimized monitoring agent you install on all your systems and containers. + +Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring tool chains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc). + +_Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._ + +Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**. + +--- + +## How it looks + +The following animated image, shows the top part of a typical Netdata dashboard. + +![peek 2018-11-11 02-40](https://user-images.githubusercontent.com/2662304/48307727-9175c800-e55b-11e8-92d8-a581d60a4889.gif) + +*A typical Netdata dashboard, in 1:1 timing. Charts can be panned by dragging them, zoomed in/out with `SHIFT` + `mouse wheel`, an area can be selected for zoom-in with `SHIFT` + `mouse selection`. Netdata is highly interactive and **real-time**, optimized to get the work done!* + +> *We have a few online demos to experience it live: [https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)* + +## User base + +Netdata is used by hundreds of thousands of users all over the world. +Check our [GitHub watchers list](https://github.com/netdata/netdata/watchers). +You will find people working for **Amazon**, **Atos**, **Baidu**, **Cisco Systems**, **Citrix**, **Deutsche Telekom**, **DigitalOcean**, +**Elastic**, **EPAM Systems**, **Ericsson**, **Google**, **Groupon**, **Hortonworks**, **HP**, **Huawei**, +**IBM**, **Microsoft**, **NewRelic**, **Nvidia**, **Red Hat**, **SAP**, **Selectel**, **TicketMaster**, +**Vimeo**, and many more! + +### Docker pulls +We provide docker images for the most common architectures. These are statistics reported by docker hub: + +[![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) [![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) [![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/) + +### Registry +When you install multiple Netdata, they are integrated into **one distributed application**, via a [Netdata registry](../registry/#registry). This is a web browser feature and it allows us to count the number of unique users and unique Netdata servers installed. The following information comes from the global public Netdata registry we run: + +[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k÷=1000&value_color=orange&precision=2&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) + +*in the last 24 hours:*<br/> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) + +## Why Netdata + +Netdata has a quite different approach to monitoring. + +Netdata is a monitoring agent you install on all your systems. It is: + +- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc) +- a **time-series database** - all stored in memory (does not touch the disks while it runs) +- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection +- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues + +All the above, are packaged together in a very flexible, extremely modular, distributed application. + +This is how Netdata compares to other monitoring solutions: + +Netdata|others (open-source and commercial) +:---:|:---: +**High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best) +Monitors everything, **thousands of metrics per node**|Monitor just a few metrics +UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view +**Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start +Install and get results **immediately**|Long preparation is required to get any useful results +Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance* +**Kills the console** for tracing performance issues|The console is always required for troubleshooting +Requires **zero dedicated resources**|Require large dedicated resources + +Netdata is **open-source**, **free**, super **fast**, very **easy**, completely **open**, extremely **efficient**, +**flexible** and integrate-able. + +It has been designed by **SysAdmins**, **DevOps** and **Developers** for troubleshooting performance problems, +not just visualize metrics. + +## How it works + +Netdata is a highly efficient, highly modular, metrics management engine. Its lockless design makes it ideal for concurrent operations on the metrics. + +![image](https://user-images.githubusercontent.com/2662304/48323827-b4c17580-e636-11e8-842c-0ee72fcb4115.png) + +This is how it works: + +Function|Description|Documentation +:---:|:---|:---: +**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](../collectors/#data-collection-plugins) +**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](../database/#database) +**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](../health/#health-monitoring) +**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](../streaming/#streaming-and-replication) +**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](../backends/) +**Query**|Multiple independent workers are attached to the [internal web server](../web/server/#web-server), servicing API requests, including [data queries](../web/api/queries/#database-queries).|[`web/api`](../web/api/#api) + +The result is a highly efficient, low latency system, supporting multiple readers and one writer on each metric. + +## Infographic + +This is a high level overview of Netdata feature set and architecture. +Click it to to interact with it (it has direct links to documentation). + +[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html) + + +## Features + +![finger-video](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif) + +This is what you should expect from Netdata: + +### General +- **1s granularity** - the highest possible resolution for all metrics. +- **Unlimited metrics** - collects all the available metrics, the more the better. +- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized. +- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](../database). +- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs). +- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box. +- **Zero maintenance** - You just run it, it does the rest. +- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored). +- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit. +- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring. + +### Health Monitoring & Alarms +- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods. +- **Notifications**: [alerta.io](../health/notifications/alerta/), [amazon sns](../health/notifications/awssns/), [discordapp.com](../health/notifications/discord/), [email](../health/notifications/email/), [flock.com](../health/notifications/flock/), [irc](../health/notifications/irc/), [kavenegar.com](../health/notifications/kavenegar/), [messagebird.com](../health/notifications/messagebird/), [pagerduty.com](../health/notifications/pagerduty/), [prowl](../health/notifications/prowl/), [pushbullet.com](../health/notifications/pushbullet/), [pushover.net](../health/notifications/pushover/), [rocket.chat](../health/notifications/rocketchat/), [slack.com](../health/notifications/slack/), [smstools3](../health/notifications/smstools3/), [syslog](../health/notifications/syslog/), [telegram.org](../health/notifications/telegram/), [twilio.com](../health/notifications/twilio/), [web](../health/notifications/web/) and [custom notifications](../health/notifications/custom/). + +### Integrations +- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). + +## Visualization + +- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`. +- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware. +- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually. +- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](../web/gui/confluence/). +- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary). + +### Positive and negative values + +To improve clarity on charts, Netdata dashboards present **positive** values for metrics representing `read`, `input`, `inbound`, `received` and **negative** values for metrics representing `write`, `output`, `outbound`, `sent`. + +![positive-and-negative-values](https://user-images.githubusercontent.com/2662304/48309090-7c5c6180-e57a-11e8-8e03-3a7538c14223.gif) + +*Netdata charts showing the bandwidth and packets of a network interface. `received` is positive and `sent` is negative.* + +### Autoscaled y-axis + +Netdata charts automatically zoom vertically, to visualize the variation of each metric within the visible time-frame. + +![non-zero-based](https://user-images.githubusercontent.com/2662304/48309139-3d2f1000-e57c-11e8-9a44-b91758134b00.gif) + +*A zero based `stacked` chart, automatically switches to an auto-scaled `area` chart when a single dimension is selected.* + +### Charts are synchronized + +Charts on Netdata dashboards are synchronized to each other. There is no master chart. Any chart can be panned or zoomed at any time, and all other charts will follow. + +![charts-are-synchronized](https://user-images.githubusercontent.com/2662304/48309003-b4fb3b80-e578-11e8-86f6-f505c7059c15.gif) + +*Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart.* + +> The visible time-frame (pan and zoom) is propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry). + +### Highlighted time-frame + +To improve visual anomaly detection across charts, the user can highlight a time-frame (by pressing `ALT` + `mouse selection`) on all charts. + +![highlighted-timeframe](https://user-images.githubusercontent.com/2662304/48311876-f9093300-e5ae-11e8-9c74-e3e291741990.gif) + +*A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts.* + +> Highlighted ranges are propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry). + + +## What does it monitor + +Netdata data collection is **extensible** - you can monitor anything you can get a metric for. +Its [Plugin API](../collectors/plugins.d/) supports all programing languages (anything can be a Netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc). + +- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`. +- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`. + +#### APM (Application Performance Monitoring) +- **[statsd](../collectors/statsd.plugin/)** - Netdata is a fully featured statsd server. +- **[Go expvar](../collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package. +- **[Spring Boot](../collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library. +- **[uWSGI](../collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications. + +#### System Resources +- **[CPU Utilization](../collectors/proc.plugin/)** - total and per core CPU usage. +- **[Interrupts](../collectors/proc.plugin/)** - total and per core CPU interrupts. +- **[SoftIRQs](../collectors/proc.plugin/)** - total and per core SoftIRQs. +- **[SoftNet](../collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity. +- **[CPU Throttling](../collectors/proc.plugin/)** - collects per core CPU throttling. +- **[CPU Frequency](../collectors/proc.plugin/)** - collects the current CPU frequency. +- **[CPU Idle](../collectors/proc.plugin/)** - collects the time spent per processor state. +- **[IdleJitter](../collectors/idlejitter.plugin/)** - measures CPU latency. +- **[Entropy](../collectors/proc.plugin/)** - random numbers pool, using in cryptography. +- **[Interprocess Communication - IPC](../collectors/proc.plugin/)** - such as semaphores and semaphores arrays. + +#### Memory +- **[ram](../collectors/proc.plugin/)** - collects info about RAM usage. +- **[swap](../collectors/proc.plugin/)** - collects info about swap memory usage. +- **[available memory](../collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes. +- **[committed memory](../collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes. +- **[Page Faults](../collectors/proc.plugin/)** - collects the system page faults (major and minor). +- **[writeback memory](../collectors/proc.plugin/)** - collects the system dirty memory and writeback activity. +- **[huge pages](../collectors/proc.plugin/)** - collects the amount of RAM used for huge pages. +- **[KSM](../collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper). +- **[Numa](../collectors/proc.plugin/)** - collects Numa info on systems that support it. +- **[slab](../collectors/proc.plugin/)** - collects info about the Linux kernel memory usage. + +#### Disks +- **[block devices](../collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc. +- **[BCACHE](../collectors/proc.plugin/)** - detailed performance of SSD caching devices. +- **[DiskSpace](../collectors/proc.plugin/)** - monitors disk space usage. +- **[mdstat](../collectors/proc.plugin/)** - software RAID. +- **[hddtemp](../collectors/python.d.plugin/hddtemp/)** - disk temperatures. +- **[smartd](../collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values. +- **[device mapper](../collectors/proc.plugin/)** - naming disks. +- **[Veritas Volume Manager](../collectors/proc.plugin/)** - naming disks. +- **[megacli](../collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats. +- **[adaptec_raid](../collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics. +- **[ioping](../collectors/ioping.plugin/)** - to measure disk read/write latency. + +#### Filesystems +- **[BTRFS](../collectors/proc.plugin/)** - detailed disk space allocation and usage. +- **[Ceph](../collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc. +- **[NFS file servers and clients](../collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls +- **[Samba](../collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing. +- **[ZFS](../collectors/proc.plugin/)** - detailed performance and resource usage. + +#### Networking +- **[Network Stack](../collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops). +- **[Netfilter](../collectors/proc.plugin/)** - everything about the netfilter connection tracker. +- **[SynProxy](../collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS). +- **[NFacct](../collectors/nfacct.plugin/)** - collects accounting data from iptables. +- **[Network QoS](../collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time +- **[FPing](../collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts. +- **[ISC dhcpd](../collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc. +- **[AP](../collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`). +- **[SNMP](../collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these). +- **[port_check](../collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time. + +#### Virtual Private Networks +- **[OpenVPN](../collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel. +- **[LibreSwan](../collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel. +- **[Tor](../collectors/python.d.plugin/tor/)** - collects Tor traffic statistics. + +#### Processes +- **[System Processes](../collectors/proc.plugin/)** - running, blocked, forks, active. +- **[Applications](../collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group. +- **[systemd](../collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS. + +#### Users +- **[Users and User Groups resource usage](../collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets +- **[logind](../collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected. + +#### Containers and VMs +- **[Containers](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc). +- **[libvirt VMs](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS. +- **[dockerd](../collectors/python.d.plugin/dockerd/)** - collects docker health metrics. + +#### Web Servers +- **[Apache and lighttpd](../collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers. +- **[IPFS](../collectors/python.d.plugin/ipfs/)** - bandwidth, peers. +- **[LiteSpeed](../collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics. +- **[Nginx](../collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers. +- **[Nginx+](../collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics. +- **[PHP-FPM](../collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc. +- **[Tomcat](../collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc. +- **[web server `access.log` files](../collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc. +- **[HTTP check](../collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content. + +#### Proxies, Balancers, Accelerators +- **[HAproxy](../collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc. +- **[Squid](../collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests. +- **[Traefik](../collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime). +- **[Varnish](../collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc. +- **[IPVS](../collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer. + +#### Database Servers +- **[CouchDB](../collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc. +- **[MemCached](../collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc. +- **[MongoDB](../collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc. +- **[MySQL and mariadb](../collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more. +- **[PostgreSQL](../collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more. +- **[Proxy SQL](../collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics. +- **[Redis](../collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves. +- **[RethinkDB](../collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics. + +#### Message Brokers +- **[beanstalkd](../collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring. +- **[RabbitMQ](../collectors/python.d.plugin/rabbitmq/)** - performance and health metrics. + +#### Search and Indexing +- **[ElasticSearch](../collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc. + +#### DNS Servers +- **[bind_rndc](../collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported. +- **[dnsdist](../collectors/python.d.plugin/dnsdist/)** - performance and health metrics. +- **[ISC Bind (named)](../collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported. +- **[NSD](../collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc. +- **[PowerDNS](../collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc. +- **[unbound](../collectors/python.d.plugin/unbound/)** - performance and resource usage metrics. +- **[dns_query_time](../collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics. + +#### Time Servers +- **[chrony](../collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time). +- **[ntpd](../collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables. + +#### Mail Servers +- **[Dovecot](../collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers. +- **[Exim](../collectors/python.d.plugin/exim/)** - message queue (emails queued). +- **[Postfix](../collectors/python.d.plugin/postfix/)** - message queue (entries, size). + +#### Hardware Sensors +- **[IPMI](../collectors/freeipmi.plugin/)** - enterprise hardware sensors and events. +- **[lm-sensors](../collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc. +- **[Nvidia](../collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs. +- **[RPi](../collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors. +- **[w1sensor](../collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors. + +#### UPSes +- **[apcupsd](../collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics +- **[NUT](../collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics +- **[Linux Power Supply](../collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux. + +#### Social Sharing Servers +- **[RetroShare](../collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics. + +#### Security +- **[Fail2Ban](../collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails. + +#### Authentication, Authorization, Accounting (AAA, RADIUS, LDAP) Servers +- **[FreeRadius](../collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting). + +#### Telephony Servers +- **[opensips](../collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics. + +#### Household Appliances +- **[SMA webbox](../collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation. +- **[Fronius](../collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation. +- **[StiebelEltron](../collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web). + +#### Game Servers +- **[SpigotMC](../collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console. + +#### Distributed Computing +- **[BOINC](../collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. + +#### Media Streaming Servers +- **[IceCast](../collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources. + +### Monitoring Systems +- **[Monit](../collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks). + +#### Provisioning Systems +- **[Puppet](../collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics. + +You can easily extend Netdata, by writing plugins that collect data from any source, using any computer language. + +## Community + +We welcome [contributions](../CONTRIBUTING.md). So, feel free to join the team. + +To report bugs, or get help, use [GitHub Issues](https://github.com/netdata/netdata/issues). + +You can also find Netdata on: + +- [Facebook](https://www.facebook.com/linuxnetdata/) +- [Twitter](https://twitter.com/linuxnetdata) +- [OpenHub](https://www.openhub.net/p/netdata) +- [Repology](https://repology.org/metapackage/netdata/versions) +- [StackShare](https://stackshare.io/netdata) + +## License + +Netdata is [GPLv3+](../LICENSE). + +Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](../REDISTRIBUTED.md).
\ No newline at end of file diff --git a/health/README.md b/health/README.md index 81cc043d..345f7fc7 100644 --- a/health/README.md +++ b/health/README.md @@ -65,7 +65,7 @@ This line starts an alarm or alarm template. alarm: NAME ``` -or +or ``` template: NAME @@ -161,7 +161,7 @@ The simple pattern syntax and operation is explained in [simple patterns](../lib This line makes a database lookup to find a value. This result of this lookup is available as `$this`. The format is: - + ``` lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] ``` @@ -311,15 +311,15 @@ delay: [[[up U] [down D] multiplier M] max X] notification for this event will be sent 10 seconds after the actual event. This is used in hope the alarm will get back to its previous state within the duration given. The default `U` is zero. - + - `down D` defines the delay to be applied to a notification for an alarm that moves to lower state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m` will delay the notification by 1 minute. This is used to prevent notifications for flapping alarms. The default `D` is zero. - + - `mutliplier M` multiplies `U` and `D` when an alarm changes state, while a notification is delayed. The default multiplier is `1.0`. - + - `max X` defines the maximum absolute notification delay an alarm may get. The default `X` is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`). @@ -361,13 +361,13 @@ repeat: [off] [warning DURATION] [critical DURATION] #### Alarm line `option` -The only possible value for the `option` line is +The only possible value for the `option` line is ``` option: no-clear-notification ``` -For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes, compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm. However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option. +For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes, compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm. However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option. --- @@ -417,14 +417,14 @@ crit: $this > (($status == $CRITICAL) ? (85) : (95)) The above say: * If the alarm is currently a warning, then the threshold for being considered a warning is 75, otherwise it's 85. - + * If the alarm is currently critical, then the threshold for being considered critical is 85, otherwise it's 95. Which in turn, results in the following behavior: * While the value is rising, it will trigger a warning when it exceeds 85, and a critical alert when it exceeds 95. - + * While the value is falling, it will return to a warning state when it goes below 85, and a normal state when it goes below 75. @@ -442,13 +442,13 @@ Which in turn, results in the following behavior: You can find all the variables that can be used for a given chart, using `http://your.netdata.ip:19999/api/v1/alarm_variables?chart=CHART_NAME` Example: [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). - + _Hint: If you don't know how to find the CHART_NAME, you can read about it [here](../docs/Charts.md#charts)._ -Netdata supports 3 internal indexes for variables that will be used in health monitoring. +Netdata supports 3 internal indexes for variables that will be used in health monitoring. <details markdown="1"><summary>The variables below can be used in both chart alarms and context templates.</summary> -Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families). +Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families). </details> - **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of $this for the other configured alarms of the chart also appears, under the name of each configured alarm. @@ -478,13 +478,13 @@ Although the `alarm_variables` link shows you variables for a particular chart, - **special variables*** are: - `$this`, which is resolved to the value of the current alarm. - + - `$status`, which is resolved to the current status of the alarm (the current = the last status, i.e. before the current database lookup and the evaluation of the `calc` line). This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`, `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as expected. - + - `$now`, which is resolved to current unix timestamp. ## Alarm Statuses @@ -493,16 +493,16 @@ Alarms can have the following statuses: - `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to netdata to reload health configuration) - + - `UNINITIALIZED` - the alarm is not initialized yet - + - `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed, a division by zero occurred, etc) - + - `CLEAR` - the alarm is not armed / raised (i.e. is OK) - + - `WARNING` - the warning expression resulted in true or non-zero - + - `CRITICAL` - the critical expression resulted in true or non-zero The external script will be called for all status changes. @@ -675,9 +675,6 @@ You can find how netdata interpreted the expressions by examining the alarm at ` ## Disabling health checks or silencing notifications at runtime -The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api). +It's currently not possible to schedule notifications from within the alarm template. For those scenarios where you need to temporary disable notifications (for instance when running backups triggers a disk alert) you can disable or silence notifications are runtime. The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api). [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() - - - diff --git a/health/health.d/dbengine.conf b/health/health.d/dbengine.conf index 7a623ba2..956abf29 100644 --- a/health/health.d/dbengine.conf +++ b/health/health.d/dbengine.conf @@ -22,5 +22,5 @@ every: 10s crit: $this > 0 delay: down 1h multiplier 1.5 max 3h - info: number of IO errors dbengine came across the last 10 minutes (out of space, bad disk etc) + info: number of IO errors dbengine came across the last 10 minutes (CRC errors, out of space, bad disk etc) to: sysadmin diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in index 852718bc..bbb96091 100755 --- a/health/notifications/alarm-notify.sh.in +++ b/health/notifications/alarm-notify.sh.in @@ -352,6 +352,7 @@ SYSLOG_FACILITY= EMAIL_SENDER= EMAIL_CHARSET=$(locale charmap 2>/dev/null) EMAIL_THREADING= +EMAIL_PLAINTEXT_ONLY= # irc configs IRC_NICKNAME= @@ -2093,16 +2094,7 @@ SENT_SYSLOG=$? # ----------------------------------------------------------------------------- # send the email -send_email <<EOF -To: ${to_email} -Subject: ${host} ${status_message} - ${name//_/ } - ${chart} -MIME-Version: 1.0 -Content-Type: multipart/alternative; boundary="multipart-boundary" -${email_thread_headers} - -This is a MIME-encoded multipart message - ---multipart-boundary +IFS='' read -r -d '' email_plaintext_part <<EOF Content-Type: text/plain; encoding=${EMAIL_CHARSET} Content-Disposition: inline Content-Transfer-Encoding: 8bit @@ -2124,8 +2116,27 @@ Evaluated Expression : ${calc_expression} Expression Variables : ${calc_param_values} The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised. +EOF + +if [[ "${EMAIL_PLAINTEXT_ONLY}" == "YES" ]]; then + +send_email <<EOF +To: ${to_email} +Subject: ${host} ${status_message} - ${name//_/ } - ${chart} +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary="multipart-boundary" +${email_thread_headers} + +This is a MIME-encoded multipart message --multipart-boundary +${email_plaintext_part} +--multipart-boundary-- +EOF + +else + +IFS='' read -r -d '' email_html_part <<EOF Content-Type: text/html; encoding=${EMAIL_CHARSET} Content-Disposition: inline Content-Transfer-Encoding: 8bit @@ -2231,9 +2242,26 @@ Content-Transfer-Encoding: 8bit </table> </body> </html> +EOF + +send_email <<EOF +To: ${to_email} +Subject: ${host} ${status_message} - ${name//_/ } - ${chart} +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary="multipart-boundary" +${email_thread_headers} + +This is a MIME-encoded multipart message + +--multipart-boundary +${email_plaintext_part} +--multipart-boundary +${email_html_part} --multipart-boundary-- EOF +fi + SENT_EMAIL=$? # ----------------------------------------------------------------------------- diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf index 0c6897ff..60621df2 100755 --- a/health/notifications/health_alarm_notify.conf +++ b/health/notifications/health_alarm_notify.conf @@ -221,6 +221,11 @@ DEFAULT_RECIPIENT_EMAIL="root" # below if you want to disable it. #EMAIL_THREADING="NO" +# By default, netdata sends HTML and Plain Text emails, some clients +# do not parse HTML emails such as command line clients. +# To make emails readable in these clients, you can configure netdata +# to not send HTML but Plain Text only emails. +#EMAIL_PLAINTEXT_ONLY="YES" #------------------------------------------------------------------------------ # pushover (pushover.net) global notification options diff --git a/libnetdata/json/json.c b/libnetdata/json/json.c index c9ff39b0..7c5adca3 100644 --- a/libnetdata/json/json.c +++ b/libnetdata/json/json.c @@ -319,8 +319,8 @@ size_t json_walk_array(char *js, jsmntok_t *t, size_t nest, size_t start, JSON_E info("JSON: JSON walk_array ignoring element with name:%s fullname:%s",e->name, e->fullname); continue; } - sprintf(ne.name, "%s[%lu]", e->name, i); - sprintf(ne.fullname, "%s[%lu]", e->fullname, i); + snprintfz(ne.name, JSON_NAME_LEN, "%s[%lu]", e->name, i); + snprintfz(ne.fullname, JSON_FULLNAME_LEN, "%s[%lu]", e->fullname, i); switch(t[start].type) { case JSMN_PRIMITIVE: diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h index 43dc1e04..ef883300 100644 --- a/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -81,6 +81,7 @@ #include <time.h> #include <unistd.h> #include <uuid/uuid.h> +#include <spawn.h> #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> @@ -312,5 +313,6 @@ extern char *netdata_configured_host_prefix; #include "url/url.h" #include "json/json.h" #include "health/health.h" +#include "string/utf8.h" #endif // NETDATA_LIB_H diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c index 91e22690..ca9a5aee 100644 --- a/libnetdata/locks/locks.c +++ b/libnetdata/locks/locks.c @@ -82,7 +82,8 @@ int __netdata_mutex_unlock(netdata_mutex_t *mutex) { return ret; } -int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_init_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -98,7 +99,8 @@ int netdata_mutex_init_debug( const char *file, const char *function, const unsi return ret; } -int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_lock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -114,7 +116,8 @@ int netdata_mutex_lock_debug( const char *file, const char *function, const unsi return ret; } -int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_trylock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -130,7 +133,8 @@ int netdata_mutex_trylock_debug( const char *file, const char *function, const u return ret; } -int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { +int netdata_mutex_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_mutex_t *mutex) { usec_t start = 0; (void)start; @@ -219,7 +223,8 @@ int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) { } -int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -235,7 +240,8 @@ int netdata_rwlock_destroy_debug( const char *file, const char *function, const return ret; } -int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_init_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -251,7 +257,8 @@ int netdata_rwlock_init_debug( const char *file, const char *function, const uns return ret; } -int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -267,7 +274,8 @@ int netdata_rwlock_rdlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -283,7 +291,8 @@ int netdata_rwlock_wrlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -299,7 +308,8 @@ int netdata_rwlock_unlock_debug( const char *file, const char *function, const u return ret; } -int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_tryrdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; @@ -315,7 +325,8 @@ int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, cons return ret; } -int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { +int netdata_rwlock_trywrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { usec_t start = 0; (void)start; diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c index 845363fd..177aebfc 100644 --- a/libnetdata/popen/popen.c +++ b/libnetdata/popen/popen.c @@ -45,110 +45,91 @@ static void mypopen_del(FILE *fp) { #define PIPE_READ 0 #define PIPE_WRITE 1 -FILE *mypopen(const char *command, volatile pid_t *pidptr) -{ - int pipefd[2]; - - if(pipe(pipefd) == -1) return NULL; +static inline FILE *custom_popene(const char *command, volatile pid_t *pidptr, char **env) { + FILE *fp; + int pipefd[2], error; + pid_t pid; + char *const spawn_argv[] = { + "sh", + "-c", + (char *)command, + NULL + }; + posix_spawnattr_t attr; + posix_spawn_file_actions_t fa; - int pid = fork(); - if(pid == -1) { - close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); + if(pipe(pipefd) == -1) return NULL; + if ((fp = fdopen(pipefd[PIPE_READ], "r")) == NULL) { + goto error_after_pipe; } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - /*mypopen_add(fp, pid);*/ - return(fp); - } - // the child - // close all files + // Mark all files to be closed by the exec() stage of posix_spawn() int i; for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); - - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); + if(i != STDIN_FILENO && i != STDERR_FILENO) + fcntl(i, F_SETFD, FD_CLOEXEC); + + if (!posix_spawn_file_actions_init(&fa)) { + // move the pipe to stdout in the child + if (posix_spawn_file_actions_adddup2(&fa, pipefd[PIPE_WRITE], STDOUT_FILENO)) { + error("posix_spawn_file_actions_adddup2() failed"); + goto error_after_posix_spawn_file_actions_init; + } + } else { + error("posix_spawn_file_actions_init() failed."); + goto error_after_pipe; } - -#ifdef DETACH_PLUGINS_FROM_NETDATA - // this was an attempt to detach the child and use the suspend mode charts.d - // unfortunatelly it does not work as expected. - - // fork again to become session leader - pid = fork(); - if(pid == -1) - error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid()); - - if(pid != 0) { - // the parent - exit(0); + if (!(error = posix_spawnattr_init(&attr))) { + // reset all signals in the child + sigset_t mask; + + if (posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF)) + error("posix_spawnattr_setflags() failed."); + sigemptyset(&mask); + if (posix_spawnattr_setsigmask(&attr, &mask)) + error("posix_spawnattr_setsigmask() failed."); + } else { + error("posix_spawnattr_init() failed."); } - - // set a new process group id for just this child - if( setpgid(0, 0) != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid()); - - if( getpgid(0) != getpid() ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0)); - - if( setsid() != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid()); - - fprintf(stdout, "MYPID %d\n", getpid()); - fflush(NULL); -#endif - - // reset all signals - signals_unblock(); - signals_reset(); - - debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid()); - execl("/bin/sh", "sh", "-c", command, NULL); - exit(1); -} - -FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { - int pipefd[2]; - - if(pipe(pipefd) == -1) - return NULL; - - int pid = fork(); - if(pid == -1) { + if (!posix_spawn(&pid, "/bin/sh", &fa, &attr, spawn_argv, env)) { + *pidptr = pid; + debug(D_CHILDS, "Spawned command: '%s' on pid %d from parent pid %d.", command, pid, getpid()); + } else { + error("Failed to spawn command: '%s' from parent pid %d.", command, getpid()); close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); - return NULL; + fp = NULL; } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - return(fp); + close(pipefd[PIPE_WRITE]); + + if (!error) { + // posix_spawnattr_init() succeeded + if (posix_spawnattr_destroy(&attr)) + error("posix_spawnattr_destroy"); } - // the child + if (posix_spawn_file_actions_destroy(&fa)) + error("posix_spawn_file_actions_destroy"); + + return fp; + +error_after_posix_spawn_file_actions_init: + if (posix_spawn_file_actions_destroy(&fa)) + error("posix_spawn_file_actions_destroy"); +error_after_pipe: + close(pipefd[PIPE_READ]); + close(pipefd[PIPE_WRITE]); + return NULL; +} - // close all files - int i; - for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); +// See man environ +extern char **environ; - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); - } +FILE *mypopen(const char *command, volatile pid_t *pidptr) { + return custom_popene(command, pidptr, environ); +} - execle("/bin/sh", "sh", "-c", command, NULL, env); - exit(1); +FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { + return custom_popene(command, pidptr, env); } int mypclose(FILE *fp, pid_t pid) { diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c index dcbd3f65..ab324a16 100644 --- a/libnetdata/socket/security.c +++ b/libnetdata/socket/security.c @@ -7,8 +7,6 @@ SSL_CTX *netdata_client_ctx=NULL; SSL_CTX *netdata_srv_ctx=NULL; const char *security_key=NULL; const char *security_cert=NULL; -int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL; -int netdata_use_ssl_on_http = NETDATA_SSL_FORCE; //We force SSL due safety reasons int netdata_validate_server = NETDATA_SSL_VALID_CERTIFICATE; /** @@ -20,7 +18,7 @@ int netdata_validate_server = NETDATA_SSL_VALID_CERTIFICATE; * @param where the variable with the flags set. * @param ret the return of the caller */ -static void security_info_callback(const SSL *ssl, int where, int ret) { +static void security_info_callback(const SSL *ssl, int where, int ret __maybe_unused) { (void)ssl; if (where & SSL_CB_ALERT) { debug(D_WEB_CLIENT,"SSL INFO CALLBACK %s %s", SSL_alert_type_string(ret), SSL_alert_desc_string_long(ret)); @@ -166,7 +164,7 @@ void security_start_ssl(int selector) { switch (selector) { case NETDATA_SSL_CONTEXT_SERVER: { struct stat statbuf; - if (stat(security_key,&statbuf) || stat(security_cert,&statbuf)) { + if (stat(security_key, &statbuf) || stat(security_cert, &statbuf)) { info("To use encryption it is necessary to set \"ssl certificate\" and \"ssl key\" in [web] !\n"); return; } @@ -176,6 +174,9 @@ void security_start_ssl(int selector) { } case NETDATA_SSL_CONTEXT_STREAMING: { netdata_client_ctx = security_initialize_openssl_client(); + //This is necessary for the stream, because it is working sometimes with nonblock socket. + //It returns the bitmask afte to change, there is not any description of errors in the documentation + SSL_CTX_set_mode(netdata_client_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |SSL_MODE_AUTO_RETRY); break; } case NETDATA_SSL_CONTEXT_OPENTSDB: { @@ -185,6 +186,11 @@ void security_start_ssl(int selector) { } } +/** + * Clean Open SSL + * + * Clean all the allocated contexts from netdata. + */ void security_clean_openssl() { if (netdata_srv_ctx) { @@ -206,6 +212,17 @@ void security_clean_openssl() { #endif } +/** + * Process accept + * + * Process the SSL handshake with the client case it is necessary. + * + * @param ssl is a pointer for the SSL structure + * @param msg is a copy of the first 8 bytes of the initial message received + * + * @return it returns 0 case it performs the handshake, 8 case it is clean connection + * and another integer power of 2 otherwise. + */ int security_process_accept(SSL *ssl,int msg) { int sock = SSL_get_fd(ssl); int test; @@ -250,9 +267,18 @@ int security_process_accept(SSL *ssl,int msg) { debug(D_WEB_CLIENT_ACCESS,"SSL Handshake finished %s errno %d on socket fd %d", ERR_error_string((long)SSL_get_error(ssl, test), NULL), errno, sock); } - return 0; + return NETDATA_SSL_HANDSHAKE_COMPLETE; } +/** + * Test Certificate + * + * Check the certificate of Netdata master + * + * @param ssl is the connection structure + * + * @return It returns 0 on success and -1 otherwise + */ int security_test_certificate(SSL *ssl) { X509* cert = SSL_get_peer_certificate(ssl); int ret; @@ -271,7 +297,48 @@ int security_test_certificate(SSL *ssl) { } else { ret = 0; } + return ret; } +/** + * Location for context + * + * Case the user give us a directory with the certificates available and + * the Netdata master certificate, we use this function to validate the certificate. + * + * @param ctx the context where the path will be set. + * @param file the file with Netdata master certificate. + * @param path the directory where the certificates are stored. + * + * @return It returns 0 on success and -1 otherwise. + */ +int security_location_for_context(SSL_CTX *ctx, char *file, char *path) { + struct stat statbuf; + if (stat(file, &statbuf)) { + info("Netdata does not have a SSL master certificate, so it will use the default OpenSSL configuration to validate certificates!"); + return 0; + } + + ERR_clear_error(); + u_long err; + char buf[256]; + if(!SSL_CTX_load_verify_locations(ctx, file, path)) { + goto slfc; + } + + if(!SSL_CTX_set_default_verify_paths(ctx)) { + goto slfc; + } + + return 0; + +slfc: + while ((err = ERR_get_error()) != 0) { + ERR_error_string_n(err, buf, sizeof(buf)); + error("Cannot set the directory for the certificates and the master SSL certificate: %s",buf); + } + return -1; +} + #endif diff --git a/libnetdata/socket/security.h b/libnetdata/socket/security.h index 8beb9672..697e0fda 100644 --- a/libnetdata/socket/security.h +++ b/libnetdata/socket/security.h @@ -25,7 +25,7 @@ struct netdata_ssl{ SSL *conn; //SSL connection - int flags; + int flags; //The flags for SSL connection }; extern SSL_CTX *netdata_opentsdb_ctx; @@ -33,9 +33,8 @@ extern SSL_CTX *netdata_client_ctx; extern SSL_CTX *netdata_srv_ctx; extern const char *security_key; extern const char *security_cert; -extern int netdata_use_ssl_on_stream; -extern int netdata_use_ssl_on_http; extern int netdata_validate_server; +extern int security_location_for_context(SSL_CTX *ctx,char *file,char *path); void security_openssl_library(); void security_clean_openssl(); diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c index 28271008..22abb47f 100644 --- a/libnetdata/socket/socket.c +++ b/libnetdata/socket/socket.c @@ -301,39 +301,47 @@ void listen_sockets_close(LISTEN_SOCKETS *sockets) { sockets->failed = 0; } -WEB_CLIENT_ACL socket_ssl_acl(char *ssl) { +/* + * SSL ACL + * + * Search the SSL acl and apply it case it is set. + * + * @param acl is the acl given by the user. + */ +WEB_CLIENT_ACL socket_ssl_acl(char *acl) { + char *ssl = strchr(acl,'^'); + if(ssl) { + //Due the format of the SSL command it is always the last command, + //we finish it here to avoid problems with the ACLs + *ssl = '\0'; #ifdef ENABLE_HTTPS - if (!strcmp(ssl,"optional")) { - netdata_use_ssl_on_http = NETDATA_SSL_OPTIONAL; - return WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; - } - else if (!strcmp(ssl,"force")) { - netdata_use_ssl_on_stream = NETDATA_SSL_FORCE; - return WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; - } + ssl++; + if (!strncmp("SSL=",ssl,4)) { + ssl += 4; + if (!strcmp(ssl,"optional")) { + return WEB_CLIENT_ACL_SSL_OPTIONAL; + } + else if (!strcmp(ssl,"force")) { + return WEB_CLIENT_ACL_SSL_FORCE; + } + } #endif + } return WEB_CLIENT_ACL_NONE; } WEB_CLIENT_ACL read_acl(char *st) { - char *ssl = strchr(st,'^'); - if (ssl) { - ssl++; - if (!strncmp("SSL=",ssl,4)) { - ssl += 4; - } - socket_ssl_acl(ssl); - } + WEB_CLIENT_ACL ret = socket_ssl_acl(st); - if (!strcmp(st,"dashboard")) return WEB_CLIENT_ACL_DASHBOARD; - if (!strcmp(st,"registry")) return WEB_CLIENT_ACL_REGISTRY; - if (!strcmp(st,"badges")) return WEB_CLIENT_ACL_BADGE; - if (!strcmp(st,"management")) return WEB_CLIENT_ACL_MGMT; - if (!strcmp(st,"streaming")) return WEB_CLIENT_ACL_STREAMING; - if (!strcmp(st,"netdata.conf")) return WEB_CLIENT_ACL_NETDATACONF; + if (!strcmp(st,"dashboard")) ret |= WEB_CLIENT_ACL_DASHBOARD; + if (!strcmp(st,"registry")) ret |= WEB_CLIENT_ACL_REGISTRY; + if (!strcmp(st,"badges")) ret |= WEB_CLIENT_ACL_BADGE; + if (!strcmp(st,"management")) ret |= WEB_CLIENT_ACL_MGMT; + if (!strcmp(st,"streaming")) ret |= WEB_CLIENT_ACL_STREAMING; + if (!strcmp(st,"netdata.conf")) ret |= WEB_CLIENT_ACL_NETDATACONF; - return socket_ssl_acl(st); + return ret; } static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) { @@ -375,7 +383,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, error("LISTENER: Cannot create unix socket '%s'", path); sockets->failed++; } else { - acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT; listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0, acl_flags); added++; } @@ -425,7 +433,13 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, } acl_flags |= read_acl(portconfig); } else { - acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT; + } + + //Case the user does not set the option SSL in the "bind to", but he has + //the certificates, I must redirect, so I am assuming here the default option + if(!(acl_flags & WEB_CLIENT_ACL_SSL_OPTIONAL) && !(acl_flags & WEB_CLIENT_ACL_SSL_FORCE)) { + acl_flags |= WEB_CLIENT_ACL_SSL_DEFAULT; } uint32_t scope_id = 0; diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h index 9ea83bcc..76b15def 100644 --- a/libnetdata/socket/socket.h +++ b/libnetdata/socket/socket.h @@ -17,7 +17,10 @@ typedef enum web_client_acl { WEB_CLIENT_ACL_BADGE = 1 << 2, WEB_CLIENT_ACL_MGMT = 1 << 3, WEB_CLIENT_ACL_STREAMING = 1 << 4, - WEB_CLIENT_ACL_NETDATACONF = 1 << 5 + WEB_CLIENT_ACL_NETDATACONF = 1 << 5, + WEB_CLIENT_ACL_SSL_OPTIONAL = 1 << 6, + WEB_CLIENT_ACL_SSL_FORCE = 1 << 7, + WEB_CLIENT_ACL_SSL_DEFAULT = 1 << 8 } WEB_CLIENT_ACL; #define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD) @@ -26,6 +29,9 @@ typedef enum web_client_acl { #define web_client_can_access_mgmt(w) ((w)->acl & WEB_CLIENT_ACL_MGMT) #define web_client_can_access_stream(w) ((w)->acl & WEB_CLIENT_ACL_STREAMING) #define web_client_can_access_netdataconf(w) ((w)->acl & WEB_CLIENT_ACL_NETDATACONF) +#define web_client_is_using_ssl_optional(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_OPTIONAL) +#define web_client_is_using_ssl_force(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_FORCE) +#define web_client_is_using_ssl_default(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_DEFAULT) typedef struct listen_sockets { struct config *config; // the config file to use diff --git a/libnetdata/string/utf8.h b/libnetdata/string/utf8.h new file mode 100644 index 00000000..133ec710 --- /dev/null +++ b/libnetdata/string/utf8.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STRING_UTF8_H +#define NETDATA_STRING_UTF8_H 1 + +#define IS_UTF8_BYTE(x) (x & 0x80) +#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&(x & 0x40)) + +#endif /* NETDATA_STRING_UTF8_H */ diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c index 07a9f806..7df9faaf 100644 --- a/libnetdata/url/url.c +++ b/libnetdata/url/url.c @@ -43,8 +43,16 @@ char *url_encode(char *str) { return pbuf; } -/* Returns a url-decoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ +/** + * URL Decode + * + * Returns a url-decoded version of str + * IMPORTANT: be sure to free() the returned string after use + * + * @param str the string that will be decode + * + * @return a pointer for the url decoded. + */ char *url_decode(char *str) { size_t size = strlen(str) + 1; @@ -52,6 +60,149 @@ char *url_decode(char *str) { return url_decode_r(buf, str, size); } +/** + * Percentage escape decode + * + * Decode %XX character or return 0 if cannot + * + * @param s the string to decode + * + * @return The character decoded on success and 0 otherwise + */ +char url_percent_escape_decode(char *s) { + if(likely(s[1] && s[2])) + return from_hex(s[1]) << 4 | from_hex(s[2]); + return 0; +} + +/** + * Get byte length + * + * This (utf8 string related) should be moved in separate file in future + * + * @param c is the utf8 character + * * + * @return It reurns the length of the specific character. + */ +char url_utf8_get_byte_length(char c) { + if(!IS_UTF8_BYTE(c)) + return 1; + + char length = 0; + while(likely(c & 0x80)) { + length++; + c <<= 1; + } + //4 byte is max size for UTF-8 char + //10XX XXXX is not valid character -> check length == 1 + if(length > 4 || length == 1) + return -1; + + return length; +} + +/** + * Decode Multibyte UTF8 + * + * Decode % encoded UTF-8 characters and copy them to *d + * + * @param s first address + * @param d + * @param d_end last address + * + * @return count of bytes written to *d + */ +char url_decode_multibyte_utf8(char *s, char *d, char *d_end) { + char first_byte = url_percent_escape_decode(s); + + if(unlikely(!first_byte || !IS_UTF8_STARTBYTE(first_byte))) + return 0; + + char byte_length = url_utf8_get_byte_length(first_byte); + + if(unlikely(byte_length <= 0 || d+byte_length >= d_end)) + return 0; + + char to_read = byte_length; + while(to_read > 0) { + char c = url_percent_escape_decode(s); + + if(unlikely( !IS_UTF8_BYTE(c) )) + return 0; + if((to_read != byte_length) && IS_UTF8_STARTBYTE(c)) + return 0; + + *d++ = c; + s+=3; + to_read--; + } + + return byte_length; +} + +/* + * The utf8_check() function scans the '\0'-terminated string starting + * at s. It returns a pointer to the first byte of the first malformed + * or overlong UTF-8 sequence found, or NULL if the string contains + * only correct UTF-8. It also spots UTF-8 sequences that could cause + * trouble if converted to UTF-16, namely surrogate characters + * (U+D800..U+DFFF) and non-Unicode positions (U+FFFE..U+FFFF). This + * routine is very likely to find a malformed sequence if the input + * uses any other encoding than UTF-8. It therefore can be used as a + * very effective heuristic for distinguishing between UTF-8 and other + * encodings. + * + * Markus Kuhn <http://www.cl.cam.ac.uk/~mgk25/> -- 2005-03-30 + * License: http://www.cl.cam.ac.uk/~mgk25/short-license.html + */ +unsigned char *utf8_check(unsigned char *s) +{ + while (*s) + { + if (*s < 0x80) + /* 0xxxxxxx */ + s++; + else if ((s[0] & 0xe0) == 0xc0) + { + /* 110XXXXx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[0] & 0xfe) == 0xc0) /* overlong? */ + return s; + else + s += 2; + } + else if ((s[0] & 0xf0) == 0xe0) + { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */ + (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */ + (s[0] == 0xef && s[1] == 0xbf && + (s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */ + return s; + else + s += 3; + } + else if ((s[0] & 0xf8) == 0xf0) + { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[3] & 0xc0) != 0x80 || + (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */ + (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */ + return s; + else + s += 4; + } + else + return s; + } + + return NULL; +} + char *url_decode_r(char *to, char *url, size_t size) { char *s = url, // source *d = to, // destination @@ -59,12 +210,24 @@ char *url_decode_r(char *to, char *url, size_t size) { while(*s && d < e) { if(unlikely(*s == '%')) { - if(likely(s[1] && s[2])) { - char t = from_hex(s[1]) << 4 | from_hex(s[2]); + char t = url_percent_escape_decode(s); + if(IS_UTF8_BYTE(t)) { + char bytes_written = url_decode_multibyte_utf8(s, d, e); + if(likely(bytes_written)){ + d += bytes_written; + s += (bytes_written * 3)-1; + } + else { + goto fail_cleanup; + } + } + else if(likely(t) && isprint(t)) { // avoid HTTP header injection - *d++ = (char)((isprint(t))? t : ' '); + *d++ = t; s += 2; } + else + goto fail_cleanup; } else if(unlikely(*s == '+')) *d++ = ' '; @@ -77,5 +240,166 @@ char *url_decode_r(char *to, char *url, size_t size) { *d = '\0'; + if(unlikely( utf8_check((unsigned char *)to) )) //NULL means sucess here + return NULL; + return to; + +fail_cleanup: + *d = '\0'; + return NULL; +} + +/** + * Is request complete? + * + * Check whether the request is complete. + * This function cannot check all the requests METHODS, for example, case you are working with POST, it will fail. + * + * @param begin is the first character of the sequence to analyse. + * @param end is the last character of the sequence + * @param length is the length of the total of bytes read, it is not the difference between end and begin. + * + * @return It returns 1 when the request is complete and 0 otherwise. + */ +inline int url_is_request_complete(char *begin, char *end, size_t length) { + + if ( begin == end) { + //Message cannot be complete when first and last address are the same + return 0; + } + + //This math to verify the last is valid, because we are discarding the POST + if (length > 4) { + begin = end - 4; + } + + return (strstr(begin, "\r\n\r\n"))?1:0; +} + +/** + * Find protocol + * + * Search for the string ' HTTP/' in the message given. + * + * @param s is the start of the user request. + * @return + */ +inline char *url_find_protocol(char *s) { + while(*s) { + // find the next space + while (*s && *s != ' ') s++; + + // is it SPACE + "HTTP/" ? + if(*s && !strncmp(s, " HTTP/", 6)) break; + else s++; + } + + return s; +} + +/** + * Map query string + * + * Map the query string fields that will be decoded. + * This functions must be called after to check the presence of query strings, + * here we are assuming that you already tested this. + * + * @param out the pointer to pointers that will be used to map + * @param url the input url that we are decoding. + * + * @return It returns the number of total variables in the query string. + */ +int url_map_query_string(char **out, char *url) { + (void)out; + (void)url; + int count = 0; + + //First we try to parse considering that there was not URL encode process + char *moveme = url; + char *ptr; + + //We always we have at least one here, so I can set this. + out[count++] = moveme; + while(moveme) { + ptr = strchr((moveme+1), '&'); + if(ptr) { + out[count++] = ptr; + } + + moveme = ptr; + } + + //I could not find any '&', so I am assuming now it is like '%26' + if (count == 1) { + moveme = url; + while(moveme) { + ptr = strchr((moveme+1), '%'); + if(ptr) { + char *test = (ptr+1); + if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) { + out[count++] = ptr; + } + } + moveme = ptr; + } + } + + return count; +} + +/** + * Parse query string + * + * Parse the query string mapped and store it inside output. + * + * @param output is a vector where I will store the string. + * @param max is the maximum length of the output + * @param map the map done by the function url_map_query_string. + * @param total the total number of variables inside map + * + * @return It returns 0 on success and -1 otherwise + */ +int url_parse_query_string(char *output, size_t max, char **map, int total) { + if(!total) { + return 0; + } + + int counter, next; + size_t length; + char *end; + char *begin = map[0]; + char save; + size_t copied = 0; + for(counter = 0, next=1 ; next <= total ; ++counter, ++next) { + if (next != total) { + end = map[next]; + length = (size_t) (end - begin); + save = *end; + *end = 0x00; + } else { + length = strlen(begin); + end = NULL; + } + length++; + + if (length > (max - copied)) { + error("Parsing query string: we cannot parse a query string so big"); + break; + } + + if(!url_decode_r(output, begin, length)) { + return -1; + } + length = strlen(output); + copied += length; + output += length; + + begin = end; + if (begin) { + *begin = save; + } + } + + return 0; } diff --git a/libnetdata/url/url.h b/libnetdata/url/url.h index 6cef6d7a..10f3fe17 100644 --- a/libnetdata/url/url.h +++ b/libnetdata/url/url.h @@ -25,4 +25,11 @@ extern char *url_decode(char *str); extern char *url_decode_r(char *to, char *url, size_t size); +#define WEB_FIELDS_MAX 400 +extern int url_map_query_string(char **out, char *url); +extern int url_parse_query_string(char *output, size_t max, char **map, int total); + +extern int url_is_request_complete(char *begin,char *end,size_t length); +extern char *url_find_protocol(char *s); + #endif /* NETDATA_URL_H */ diff --git a/netdata-installer.sh b/netdata-installer.sh index a0c3f828..a79f41c1 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -199,6 +199,7 @@ NETDATA_PREFIX= LIBS_ARE_HERE=0 NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}" RELEASE_CHANNEL="nightly" +IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY:-"no"}" while [ -n "${1}" ]; do case "${1}" in "--zlib-is-really-here") LIBS_ARE_HERE=1;; @@ -984,8 +985,13 @@ fi # ----------------------------------------------------------------------------- progress "Copy uninstaller" -sed "s|ENVIRONMENT_FILE=\"/etc/netdata/.environment\"|ENVIRONMENT_FILE=\"${NETDATA_PREFIX}/etc/netdata/.environment\"|" packaging/installer/netdata-uninstaller.sh > ${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh -chmod 750 ${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh +if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh ]; then + echo >&2 "Removing uninstaller from old location" + rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh; +fi + +sed "s|ENVIRONMENT_FILE=\"/etc/netdata/.environment\"|ENVIRONMENT_FILE=\"${NETDATA_PREFIX}/etc/netdata/.environment\"|" packaging/installer/netdata-uninstaller.sh > ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh +chmod 750 ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh # ----------------------------------------------------------------------------- progress "Basic netdata instructions" @@ -1006,19 +1012,24 @@ To start netdata run: ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_START_CMD}${TPUT_RESET} END -echo >&2 "Uninstall script copied to: ${TPUT_RED}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh${TPUT_RESET}" +echo >&2 "Uninstall script copied to: ${TPUT_RED}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh${TPUT_RESET}" echo >&2 progress "Install netdata updater tool" +if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh ]; then + echo >&2 "Removing updater from previous location" + rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh +fi + if [ -f "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" ]; then - sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" || exit 1 + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || exit 1 else - sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" || exit 1 + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || exit 1 fi -chmod 0755 ${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh -echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh${TPUT_RESET}" +chmod 0755 ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh +echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh${TPUT_RESET}" echo >&2 # Figure out the cron directory for the distro @@ -1042,7 +1053,7 @@ else echo >&2 "Adding to cron" rm -f "${crondir}/netdata-updater" - ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata-updater.sh" "${crondir}/netdata-updater" + ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" "${crondir}/netdata-updater" echo >&2 "Auto-updating has been enabled. Updater script linked to: ${TPUT_RED}${TPUT_BOLD}${crondir}/netdata-update${TPUT_RESET}" echo >&2 @@ -1061,7 +1072,10 @@ else fi fi +progress "Wrap up environment set up" + # Save environment variables +echo >&2 "Preparing .environment file" cat <<EOF > "${NETDATA_USER_CONFIG_DIR}/.environment" # Created by installer PATH="${PATH}" @@ -1073,10 +1087,14 @@ INSTALL_UID="${UID}" NETDATA_GROUP="${NETDATA_GROUP}" REINSTALL_COMMAND="${REINSTALL_COMMAND}" RELEASE_CHANNEL="${RELEASE_CHANNEL}" -# This value is meant to be populated by autoupdater (if enabled) -NETDATA_TARBALL_CHECKSUM="new_installation" +IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY}" +NETDATA_LIB_DIR="${NETDATA_LIB_DIR}" EOF +echo >&2 "Setting netdata.tarball.checksum to 'new_installation'" +cat <<EOF > "${NETDATA_LIB_DIR}/netdata.tarball.checksum" +new_installation +EOF # ----------------------------------------------------------------------------- echo >&2 diff --git a/netdata.spec.in b/netdata.spec.in index 25b5f9a4..d686906f 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -92,6 +92,7 @@ URL: http://my-netdata.io # Build dependencies # BuildRequires: gcc +BuildRequires: gcc-c++ BuildRequires: make BuildRequires: git BuildRequires: autoconf @@ -190,6 +191,26 @@ BuildRequires: cups-devel Requires: cups # end - cups plugin dependencies +# Prometheus remote write dependencies +BuildRequires: snappy-devel +BuildRequires: protobuf-devel +%if 0%{?suse_version} +BuildRequires: libprotobuf-c-devel +%else +BuildRequires: protobuf-c-devel +%endif + +%if 0%{?suse_version} +Requires: libsnappy1 +Requires: protobuf-c +Requires: libprotobuf15 +%else +Requires: snappy +Requires: protobuf-c +Requires: protobuf +%endif +# end - prometheus remote write dependencies + # ##################################################################### # End of dependency management configuration # ##################################################################### diff --git a/packaging/DISTRIBUTIONS.md b/packaging/DISTRIBUTIONS.md new file mode 100644 index 00000000..d180f25f --- /dev/null +++ b/packaging/DISTRIBUTIONS.md @@ -0,0 +1,37 @@ +# Netdata distribution support matrix +![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true) + +In the following table we've listed Netdata's official supported operating systems. We detail the distributions, flavors, and the level of support Netdata is currently capable to provide. + +The following table is a work in progress. We have concluded on the list of distributions +that we currently supporting and we are working on documenting our current state so that our users +have complete visibility over the range of support. + +Distribution | Family | Architecture | Code health | Installer support | Kickstart support | Binary packaging support | Integrity testing (CI) | Functionality testing (CI) | Community support +:------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :-------------------- +14.04.6 LTS (Trusty Tahr) | Ubuntu | | | | | | | | +16.04.6 LTS (Xenial Xerus) | Ubuntu | | | | | | | | +18.04.2 LTS (Bionic Beaver) | Ubuntu | | | | | | | | +19.04 (Disco Dingo) Latest | Ubuntu | | | | | | | | +Debian 7 (Wheezy) | Debian | | | | | | | | +Debian 8 (Jessie) | Debian | | | | | | | | +Debian 9 (Stretch) | Debian | | | | | | | | +Debian 10 (Buster) | Debian | | | | | | | | +Versions 6.* | RHEL | | | | | | | | +Versions 7.* | RHEL | | | | | | | | +Versions 8.* | RHEL | | | | | | | | +Fedora 28 | Fedora | | | | | | | | +Fedora 29 | Fedora | | | | | | | | +Fedora 30 | Fedora | | | | | | | | +Fedora 31 | Fedora | | | | | | | | +CentOS 6.* | Cent OS | | | | | | | | | +CentOS 7.* | Cent OS | | | | | | | | | +CentOS 8.* | Cent OS | | | | | | | | | +Open SuSE Leap 15.0 | Open SuSE | | | | | | | | +Open SuSE Leap 15.1 | Open SuSE | | | | | | | | +Open SuSE Tumbleweed (latest) | Open SuSE | | | | | | | | +SuSE Enterprise Linux Server 11 | SLES | | | | | | | | +SuSE Enterprise Linux Server 12 | SLES | | | | | | | | +SuSE Enterprise Linux Server 15 | SLES | | | | | | | | +Arch Linux (latest) | Arch | | | | | | | | +All other linux | Other | | | | | | | | diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 98fdce5c..4be2d93b 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -58,9 +58,11 @@ COPY --from=builder /app / # Configure system ARG NETDATA_UID=201 ARG NETDATA_GID=201 +ENV DOCKER_GRP netdata +ENV DOCKER_USR netdata RUN \ # provide judy installation to base image - apk add make alpine-sdk && \ + apk add make alpine-sdk shadow && \ cd /judy-${JUDY_VER} && make install && cd / && \ # Clean the source stuff once judy is installed rm -rf /judy-${JUDY_VER} && apk del make alpine-sdk && \ @@ -69,8 +71,8 @@ RUN \ chmod 4755 /usr/local/bin/fping && \ mkdir -p /var/log/netdata && \ # Add netdata user - addgroup -g ${NETDATA_GID} -S netdata && \ - adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ + addgroup -g ${NETDATA_GID} -S "${DOCKER_GRP}" && \ + adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G "${DOCKER_GRP}" "${DOCKER_USR}" && \ # Apply the permissions as described in # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories chown -R root:netdata /etc/netdata && \ diff --git a/packaging/docker/README.md b/packaging/docker/README.md index 0bf416cd..4e21918e 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -24,9 +24,10 @@ This is good for an internal network or to quickly analyse a host. ```bash docker run -d --name=netdata \ -p 19999:19999 \ + -v /etc/passwd:/host/etc/passwd:ro \ + -v /etc/group:/host/etc/group:ro \ -v /proc:/host/proc:ro \ -v /sys:/host/sys:ro \ - -v /var/run/docker.sock:/var/run/docker.sock:ro \ --cap-add SYS_PTRACE \ --security-opt apparmor=unconfined \ netdata/netdata @@ -47,35 +48,57 @@ services: security_opt: - apparmor:unconfined volumes: + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro - /proc:/host/proc:ro - /sys:/host/sys:ro - - /var/run/docker.sock:/var/run/docker.sock:ro ``` +If you don't want to use the apps.plugin functionality, you can remove the mounts of `/etc/passwd` and `/etc/group` (they are used to get proper user and group names for the monitored host) to get slightly better security. + ### Docker container names resolution -If you want to have your container names resolved by netdata, you need to do two things: -1) Make netdata user be part of the group that owns the socket. - To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the netdata container, - where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host. - This group number can be found by running the following (if socket group ownership is docker): - ```bash - grep docker /etc/group | cut -d ':' -f 3 - ``` - -2) Change docker socket access level to read/write like so: - from - ``` - /var/run/docker.sock:/var/run/docker.sock:ro - ``` - - change to - ``` - /var/run/docker.sock:/var/run/docker.sock:rw - ``` +There are a few options for resolving container names within netdata. Some methods of doing so will allow root access to your machine from within the container. Please read the following carefully. + +#### Docker Socket Proxy (Safest Option) + +Deploy a Docker socket proxy that accepts and filter out requests using something like [HAProxy](https://docs.netdata.cloud/docs/running-behind-haproxy/) so that it restricts connections to read-only access to the CONTAINERS endpoint. + +The reason it's safer to expose the socket to the proxy is because netdata has a TCP port exposed outside the Docker network. Access to the proxy container is limited to only within the network. + +#### Giving group access to Docker Socket (Less safe) **Important Note**: You should seriously consider the necessity of activating this option, -as it grants to the netdata user access to the privileged socket connection of docker service +as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine. + +If you want to have your container names resolved by Netdata, make the `netdata` user be part of the group that owns the socket. + +To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the Netdata container, +where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host. + +This group number can be found by running the following (if socket group ownership is docker): + +```bash +grep docker /etc/group | cut -d ':' -f 3 +``` + +#### Running as root (Unsafe) + +**Important Note**: You should seriously consider the necessity of activating this option, +as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine. + +```yaml +version: '3' +services: + netdata: + image: netdata/netdata + # ... rest of your config ... + volumes: + # ... other volumes ... + - /var/run/docker.sock:/var/run/docker.sock:ro + environment: + - DOCKER_USR=root +``` ### Pass command line options to Netdata @@ -132,6 +155,8 @@ services: security_opt: - apparmor:unconfined volumes: + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro - /proc:/host/proc:ro - /sys:/host/sys:ro - /var/run/docker.sock:/var/run/docker.sock:ro diff --git a/packaging/docker/build-test.sh b/packaging/docker/build-test.sh index a7e31d4f..3c55e173 100755 --- a/packaging/docker/build-test.sh +++ b/packaging/docker/build-test.sh @@ -46,27 +46,29 @@ do esac done -if [ -n "${REPOSITORY}" ] && [ -n "${VERSION}" ] && [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then +if [ -n "${REPOSITORY}" ]; then if [ $DOBUILD -eq 1 ] ; then - echo "Building ${VERSION} of ${REPOSITORY} container" + echo "Building ${VERSION:-latest} of ${REPOSITORY} container" docker run --rm --privileged multiarch/qemu-user-static:register --reset # Build images using multi-arch Dockerfile. - eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION}" --file packaging/docker/Dockerfile ./ + eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION:-latest}" --file packaging/docker/Dockerfile ./ # Create temporary docker CLI config with experimental features enabled (manifests v2 need it) mkdir -p /tmp/docker #echo '{"experimental":"enabled"}' > /tmp/docker/config.json fi - # Login to docker hub to allow futher operations - echo "Logging into docker" - echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin + if [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then + # Login to docker hub to allow futher operations + echo "Logging into docker" + echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin - echo "Pushing ${REPOSITORY}:${VERSION}" - docker --config /tmp/docker push "${REPOSITORY}:${VERSION}" + echo "Pushing ${REPOSITORY}:${VERSION}" + docker --config /tmp/docker push "${REPOSITORY}:${VERSION}" + fi else - echo "Missing parameter. REPOSITORY=${REPOSITORY} VERSION=${VERSION} DOCKER_USERNAME=${DOCKER_USERNAME} DOCKER_PWD=${DOCKER_PWD}" + echo "Missing parameter. REPOSITORY=${REPOSITORY}" printhelp exit 1 fi diff --git a/packaging/docker/publish.sh b/packaging/docker/publish.sh index fd1883af..5a9e67ed 100755 --- a/packaging/docker/publish.sh +++ b/packaging/docker/publish.sh @@ -39,10 +39,6 @@ if [ ! -z ${DEVEL+x} ]; then declare -a ARCHS=(${DEVEL_ARCHS[@]}) fi -echo "Syncing repository with latest changes (We may have updated with package versions)" -git checkout master -git pull - # Ensure there is a version, the most appropriate one if [ "${VERSION}" == "" ]; then VERSION=$(git tag --points-at) diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh index 2b5047cd..f4377d45 100755 --- a/packaging/docker/run.sh +++ b/packaging/docker/run.sh @@ -9,41 +9,15 @@ set -e echo "Netdata entrypoint script starting" if [ ${RESCRAMBLE+x} ]; then - echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" - apk upgrade --update-cache --available + echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" + apk upgrade --update-cache --available fi -create_group_and_assign_to_user() { - local local_DOCKER_GROUP="$1" - local local_DOCKER_GID="$2" - local local_DOCKER_USR="$3" - - echo >&2 "Adding group with ID ${local_DOCKER_GID} and name '${local_DOCKER_GROUP}'" - addgroup -g "${local_DOCKER_GID}" "${local_DOCKER_GROUP}" || echo >&2 "Could not add group ${local_DOCKER_GROUP} with ID ${local_DOCKER_GID}, its already there probably" - - echo >&2 "Adding user '${local_DOCKER_USR}' to group '${local_DOCKER_GROUP}/${local_DOCKER_GID}'" - sed -i "s/:${local_DOCKER_GID}:$/:${local_DOCKER_GID}:${local_DOCKER_USR}/g" /etc/group - - # Make sure we use the right docker group - GRP_TO_ASSIGN="$(grep ":x:${local_DOCKER_GID}:" /etc/group | cut -d':' -f1)" - if [ -z "${GRP_TO_ASSIGN}" ]; then - echo >&2 "Could not find group ID ${local_DOCKER_GID} in /etc/group. Check your logs and report it if this is an unrecovereable error" - else - echo >&2 "Group creation and assignment completed, netdata was assigned to group ${GRP_TO_ASSIGN}/${local_DOCKER_GID}" - echo "${GRP_TO_ASSIGN}" - fi -} - -DOCKER_USR="netdata" -DOCKER_SOCKET="/var/run/docker.sock" -DOCKER_GROUP="docker" - -if [ -S "${DOCKER_SOCKET}" ] && [ -n "${PGID}" ]; then - GRP=$(create_group_and_assign_to_user "${DOCKER_GROUP}" "${PGID}" "${DOCKER_USR}") - if [ -n "${GRP}" ]; then - echo "Adjusting ownership of mapped docker socket '${DOCKER_SOCKET}' to root:${GRP}" - chown "root:${GRP}" "${DOCKER_SOCKET}" || echo "Failed to change ownership on docker socket, container name resolution might not work" - fi +if [ -n "${PGID}" ]; then + echo "Creating docker group ${PGID}" + addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably" + echo "Assign netdata user to docker group ${PGID}" + usermod -a -G ${PGID} ${DOCKER_USR} || echo >&2 "Could not add netdata user to group docker with ID ${PGID}" fi exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/packaging/installer/README.md b/packaging/installer/README.md index b10ffa05..67a7a912 100644 --- a/packaging/installer/README.md +++ b/packaging/installer/README.md @@ -6,7 +6,6 @@ The best way to install Netdata is directly from source. Our **automatic install !!! warning You can find Netdata packages distributed by third parties. In many cases, these packages are either too old or broken. So, the suggested ways to install Netdata are the ones in this page. - **We are currently working to provide our binary packages for all Linux distros.** Stay tuned... 1. [Automatic one line installation](#one-line-installation), easy installation from source, **this is the default** 2. [Install pre-built static binary on any 64bit Linux](#linux-64bit-pre-built-static-binary) @@ -17,6 +16,7 @@ The best way to install Netdata is directly from source. Our **automatic install 7. [Enable on FreeNAS Corral](#freenas) 8. [Install on macOS (OS X)](#macos) 9. [Install on a Kubernetes cluster](https://github.com/netdata/helmchart#netdata-helm-chart-for-kubernetes-deployments) +10. [Install using binary packages](#binary-packages) See also the list of Netdata [package maintainers](../maintainers) for ASUSTOR NAS, OpenWRT, ReadyNAS, etc. @@ -24,26 +24,27 @@ Note: From Netdata v1.12 and above, anonymous usage information is collected by --- -## One line installation +## One-line installation +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) -> This method is **fully automatic on all Linux** distributions. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information. +This method is **fully automatic on all Linux distributions**. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information. -To install Netdata from source and keep it up to date automatically, run the following: +To install Netdata from source, and keep it up to date with our **nightly releases** automatically, run the following: -```bash -bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` bash +$ bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` -*(do not `sudo` this command, it will do it by itself as needed)* +!!! note + Do not use `sudo` for the one-line installer—it will escalate privileges itself if needed. -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) + To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](#nightly-vs-stable-releases). -<details markdown="1"><summary>Click here for more information and advanced use of this command.</summary> +<details markdown="1"><summary>Click here for more information and advanced use of the one-line installation script.</summary> - <br/> Verify the integrity of the script with this: -```bash +``` bash [ "8a2b054081a108dff915994ce77f2f2d" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` *It should print `OK, VALID` if the script is the one we ship.* @@ -56,48 +57,47 @@ The `kickstart.sh` script: - installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated daily (you will get a message from cron only if the update fails). - For QA purposes, this installation method lets us know if it succeed or failed. -The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to change the installation directory, enable/disable plugins, etc (check below). +The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters: -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. -You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. +- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. +- `--dont-start-it`: Prevent the installer from starting Netdata automatically. +- `--stable-channel`: Automatically update only on the release of new major versions. +- `--no-updates`: Prevent automatic updates of any kind. -Example: +Example using all the above parameters: ```bash - bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait --dont-start-it --stable-channel +$ bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait --dont-start-it --no-updates --stable-channel ``` -If you don't want to receive automatic updates, add `--no-updates` when executing `kickstart.sh` script. - -</details> <br/> +</details> Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). --- ## Linux 64bit pre-built static binary +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) -You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system -(even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). -You can also use these packages on systems with broken or unsupported package managers. +You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system (even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). You can also use these packages on systems with broken or unsupported package managers. -To install Netdata with a binary package on any Linux distro, any kernel version - for **Intel/AMD 64bit** hosts, run the following: +To install Netdata from a binary package on any Linux distro and any kernel version on **Intel/AMD 64bit** systems, and keep it up to date with our **nightly releases** automatically, run the following: ```bash - - bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) - +$ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) ``` -*(do not `sudo` this command, it will do it by itself as needed; if the target system does not have `bash` installed, see below for instructions to run it without `bash`)* +!!! note + Do not use `sudo` for this installer—it will escalate privileges itself if needed. -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) + To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](README.md#nightly-vs-stable-releases). -> The static builds install Netdata at **`/opt/netdata`** + If your system does not have `bash` installed, open the `More information and advanced uses of the kickstart-static64.sh script` dropdown for instructions to run the installer without `bash`. + + This script installs Netdata at `/opt/netdata`. <details markdown="1"><summary>Click here for more information and advanced use of this command.</summary> - <br/> Verify the integrity of the script with this: ```bash @@ -106,15 +106,17 @@ Verify the integrity of the script with this: *It should print `OK, VALID` if the script is the one we ship.* -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. -You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. +The `kickstart-static64.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters: -Example: +- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. +- `--dont-start-it`: Prevent the installer from starting Netdata automatically. +- `--stable-channel`: Automatically update only on the release of new major versions. +- `--no-updates`: Prevent automatic updates of any kind. -```bash - - bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it --stable-channel +Example using all the above parameters: +```bash +$ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it --no-updates --stable-channel ``` If your shell fails to handle the above one liner, do this: @@ -135,7 +137,7 @@ sh /tmp/kickstart-static64.sh - The same files can be used for updates too. - For QA purposes, this installation method lets us know if it succeed or failed. -</details> <br/> +</details> Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). @@ -205,16 +207,16 @@ This is how to do it by hand: ```sh # Debian / Ubuntu -apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl +apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl python # Fedora -dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils +dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python # CentOS / Red Hat Enterprise Linux -yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel +yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel make nc pkgconfig python zlib-devel # openSUSE -zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils +zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python ``` @@ -293,6 +295,26 @@ To apply the changes you made, you have to restart Netdata. --- +### Binary Packages +![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true) + +We provide our own flavour of binary packages for the most common operating systems that comply with .RPM and .DEB packaging formats. + +We have currently released packages following the .RPM format with version [1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0). +We have planned to release packages following the .DEB format with version [1.17.0](https://github.com/netdata/netdata/releases/tag/v1.17.0). +Early adopters may experiment with our .DEB formatted packages using our nightly releases. Our current packaging infrastructure provider is [Package Cloud](https://packagecloud.io). + +Netdata is committed to support installation of our solution to all operating systems. This is a constant battle for Netdata, as we strive to automate and make things easier for our users. For the operating system support matrix, please visit our [distributions](../../packaging/DISTRIBUTIONS.md) support page. + +We provide two separate repositories, one for our stable releases and one for our nightly releases. + +1. Stable releases: Our stable production releases are hosted in [netdata/netdata](https://packagecloud.io/netdata/netdata) repository of package cloud +2. Nightly releases: Our latest releases are hosted in [netdata/netdata-edge](https://packagecloud.io/netdata/netdata-edge) repository of package cloud + +Visit the repository pages and follow the quick set-up instructions to get started. + +--- + ## Other Systems @@ -448,4 +470,38 @@ Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as a [ -x /etc/rc.netdata ] && /etc/rc.netdata start ``` + +## Nightly vs. stable releases + +The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration. + +**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws, or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when we're ready, we simply change the release tags on GitHub. That means nightly releases are stable and proven to function correctly in the vast majority of Netdata use cases. That's why nightly is the *best choice for most Netdata users*. + +**Stable**: We create stable releases whenever we believe the code has reached a major milestone. Most often, stable releases correlate with the introduction of new, significant features. Stable releases might be a better choice for those who run Netdata in *mission-critical production systems*, as updates will come more infrequently, and only after the community helps fix any bugs that might have been introduced in previous releases. + +**Pros of using nightly releases:** + + - Get the latest features and bugfixes as soon as they're available + - Receive security-related fixes immediately + - Use stable, fully-tested code that's always improving + - Leverage the same Netdata experience our community is using + +**Pros of using stable releases:** + + - Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation + - Retain more control over the Netdata version you use + + +## Automatic updates + +By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels. + +If you would prefer to manually update your Netdata agent, you can disable automatic updates by using the `--no-updates` option when you install or update Netdata using the [one-line installation script](#one-line-installation). + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates +``` + +With automatic updates disabled, you can choose exactly when and how you [update Netdata](UPDATE.md). + [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md index 765184d3..43d03b00 100644 --- a/packaging/installer/UNINSTALL.md +++ b/packaging/installer/UNINSTALL.md @@ -16,7 +16,7 @@ NETDATA_ADDED_TO_GROUPS="<additional groups>" # Additional groups for a user ru ``` 3. Run `netdata-uninstaller.sh` as follows ``` -${NETDATA_PREFIX}/usr/libexec/netdata-uninstaller.sh --yes --env <environment_file> +${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env <environment_file> ``` Note: Existing installations may still need to download the file if it's not present. diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh index 0bbdaac2..41ada623 100755 --- a/packaging/installer/netdata-uninstaller.sh +++ b/packaging/installer/netdata-uninstaller.sh @@ -2,14 +2,15 @@ #shellcheck disable=SC2181 # # This is the netdata uninstaller script +# # Variables needed by script and taken from '.environment' file: # - NETDATA_PREFIX # - NETDATA_ADDED_TO_GROUPS # # Copyright: SPDX-License-Identifier: GPL-3.0-or-later # -# Author: Paul Emm. Katsoulakis <paul@netdata.cloud> -# +# Author: Paweł Krupa <paulfantom@gmail.com> +# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud> usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index 83031f3a..6609edd5 100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash #shellcheck disable=SC2164 - -# this script will uninstall netdata - +# +# Netdata updater utility +# # Variables needed by script: # - PATH # - CFLAGS @@ -11,6 +11,12 @@ # - NETDATA_TARBALL_URL # - NETDATA_TARBALL_CHECKSUM_URL # - NETDATA_TARBALL_CHECKSUM +# - NETDATA_PREFIX / NETDATA_LIB_DIR (After 1.16.1 we will only depend on lib dir) +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author: Paweł Krupa <paulfantom@gmail.com> +# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud> info() { echo >&3 "$(date) : INFO: " "${@}" @@ -67,21 +73,26 @@ download() { } set_tarball_urls() { + local extension="tar.gz" if [ ! -z "${NETDATA_LOCAL_TARBAL_OVERRIDE}" ]; then info "Not fetching remote tarballs, local override was given" return fi + if [ "$2" == "yes" ]; then + extension="gz.run" + fi + if [ "$1" = "stable" ]; then local latest # Simple version # latest="$(curl -sSL https://api.github.com/repos/netdata/netdata/releases/latest | grep tag_name | cut -d'"' -f4)" latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)" - export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.tar.gz" + export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.${extension}" export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt" else - export NETDATA_TARBALL_URL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.tar.gz" + export NETDATA_TARBALL_URL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.${extension}" export NETDATA_TARBALL_CHECKSUM_URL="https://storage.googleapis.com/netdata-nightlies/sha256sums.txt" fi } @@ -131,10 +142,12 @@ update() { info "Re-installing netdata..." eval "${REINSTALL_COMMAND} --dont-wait ${do_not_start}" >&3 2>&3 || fatal "FAILED TO COMPILE/INSTALL NETDATA" + + # We no longer store checksum info here. but leave this so that we clean up all environment files upon next update. sed -i '/NETDATA_TARBALL/d' "${ENVIRONMENT_FILE}" - cat <<EOF >>"${ENVIRONMENT_FILE}" -NETDATA_TARBALL_CHECKSUM="$NEW_CHECKSUM" -EOF + + info "Updating tarball checksum info" + echo "${NEW_CHECKSUM}" > "${NETDATA_LIB_DIR}/netdata.tarball.checksum" fi rm -rf "${tmpdir}" >&3 2>&3 @@ -148,6 +161,12 @@ EOF # shellcheck source=/dev/null source "${ENVIRONMENT_FILE}" || exit 1 +# We dont expect to find lib dir variable on older installations, so load this path if none found +export NETDATA_LIB_DIR="${NETDATA_LIB_DIR:-${NETDATA_PREFIX}/var/lib/netdata}" + +# Source the tarbal checksum, if not already available from environment (for existing installations with the old logic) +[[ -z "${NETDATA_TARBALL_CHECKSUM}" ]] && [[ -f ${NETDATA_LIB_DIR}/netdata.tarball.checksum ]] && NETDATA_TARBALL_CHECKSUM="$(cat "${NETDATA_LIB_DIR}/netdata.tarball.checksum")" + if [ "${INSTALL_UID}" != "$(id -u)" ]; then fatal "You are running this script as user with uid $(id -u). We recommend to run this script as root (user with uid 0)" fi @@ -165,7 +184,34 @@ else exec 3>"${logfile}" fi -set_tarball_urls "${RELEASE_CHANNEL}" +set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}" + +if [ "${IS_NETDATA_STATIC_BINARY}" == "yes" ]; then + TMPDIR="$(create_tmp_directory)" + PREVDIR="$(pwd)" + + echo >&2 "Entering ${TMPDIR}" + cd "${TMPDIR}" + + download "${NETDATA_TARBALL_CHECKSUM_URL}" "${TMPDIR}/sha256sum.txt" + download "${NETDATA_TARBALL_URL}" "${TMPDIR}/netdata-latest.gz.run" + if ! grep netdata-latest.gz.run "${TMPDIR}/sha256sum.txt" | safe_sha256sum -c - >/dev/null 2>&1; then + fatal "Static binary checksum validation failed. Stopping netdata installation and leaving binary in ${TMPDIR}" + fi + + # Do not pass any options other than the accept, for now + sh "${TMPDIR}/netdata-latest.gz.run" --accept + + #shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + rm -r "${TMPDIR}" + else + echo >&2 "NOTE: did not remove: ${TMPDIR}" + fi + echo >&2 "Switching back to ${PREVDIR}" + cd "${PREVDIR}" +else + # the installer updates this script - so we run and exit in a single line + update && exit 0 +fi -# the installer updates this script - so we run and exit in a single line -update && exit 0 diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh index 71ea0f63..80fba315 100755 --- a/packaging/makeself/jobs/70-netdata-git.install.sh +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -13,6 +13,10 @@ else # export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness" fi +# We export this to 'yes', installer sets this to .environment. +# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one +export IS_NETDATA_STATIC_BINARY="yes" + run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \ --dont-wait \ --dont-start-it \ diff --git a/packaging/version b/packaging/version index a406138e..e17963c5 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.16.0 +v1.16.1 diff --git a/streaming/README.md b/streaming/README.md index 3e58f1f0..1bfbb236 100644 --- a/streaming/README.md +++ b/streaming/README.md @@ -138,7 +138,7 @@ headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not proxy with db|not `none`|not `none`|`yes`|possible|possible|yes central netdata|not `none`|not `none`|`no`|possible|possible|yes -For the options to encrypt the data stream between the slave and the master, refer to [securing the communication](#securing-the-communication) +For the options to encrypt the data stream between the slave and the master, refer to [securing the communication](#securing-streaming-communications) ##### options for the receiving node @@ -213,40 +213,91 @@ The receiving end (`proxy` or `master`) logs entries like these: For netdata v1.9+, streaming can also be monitored via `access.log`. -### Securing the communication +### Securing streaming communications -Netdata does not activate TLS encryption by default. To encrypt the connection, you first need to [enable TLS support](../web/server/#enabling-tls-support) on the master. With encryption enabled on the receiving side, we need to instruct the slave to use SSL as well. On the slave's `stream.conf`, configure the destination as follows: +Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS support](../web/server/#enabling-tls-support) on the master. With encryption enabled on the receiving side, you need to instruct the slave to use TLS/SSL as well. On the slave's `stream.conf`, configure the destination as follows: ``` [stream] destination = host:port:SSL ``` -The word SSL appended to the end of the destination tells the slave that the connection must be encrypted. +The word `SSL` appended to the end of the destination tells the slave that connections must be encrypted. + +??? info "Differences in TLS and SSL terminology" + While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`. #### Certificate verification -When SSL is enabled on the slave, the default behavior will be do not connect with the master unless the server's certificate can be verified via the default chain. In case you want to avoid this check, add to the slave's `stream.conf` the following: +When TLS/SSL is enabled on the slave, the default behavior will be to not connect with the master unless the server's certificate can be verified via the default chain. In case you want to avoid this check, add the following to the slave's `stream.conf` file: ``` [stream] ssl skip certificate verification = yes ``` +#### Trusted certificate + +If you've enabled [certificate verification](#certificate-verification), you might see errors from the OpenSSL library when there's a problem with checking the certificate chain (`X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY`). More importantly, OpenSSL will reject self-signed certificates. + +Given these known issues, you have two options. If you trust your certificate, you can set the options `CApath` and `CAfile` to inform Netdata where your certificates, and the certificate trusted file, are stored. + +For more details about these options, you can read about [verify locations](https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html). + +Before you changed your streaming configuration, you need to copy your trusted certificate to your slave system and add the certificate to OpenSSL's list. + +On most Linux distributions, the `update-ca-certificates` command searches inside the `/usr/share/ca-certificates` directory for certificates. You should double-check by reading the `update-ca-certificate` manual (`man update-ca-certificate`), and then change the directory in the below commands if needed. + +If you have `sudo` configured on your slave system, you can use that to run the following commands. If not, you'll have to log in as `root` to complete them. + +``` +# mkdir /usr/share/ca-certificates/netdata +# cp master_cert.pem /usr/share/ca-certificates/netdata/master_cert.crt +# chown -R netdata.netdata /usr/share/ca-certificates/netdata/ +``` + +First, you create a new directory to store your certificates for Netdata. Next, you need to change the extension on your certificate from `.pem` to `.crt` so it's compatible with `update-ca-certificate`. Finally, you need to change permissions so the user that runs Netdata can access the directory where you copied in your certificate. + +Next, edit the file `/etc/ca-certificates.conf` and add the following line: + +``` +netdata/master_cert.crt +``` + +Now you update the list of certificates running the following, again either as `sudo` or `root`: + +``` +# update-ca-certificates +``` + +!!! note + Some Linux distributions have different methods of updating the certificate list. For more details, please read this guide on [addding trusted root certificates](https://github.com/Busindre/How-to-Add-trusted-root-certificates). + +Once you update your certificate list, you can set the stream parameters for Netdata to trust the master certificate. Open `stream.conf` for editing and change the following lines: + +``` +[stream] + CApath = /etc/ssl/certs/ + CAfile = /etc/ssl/certs/master_cert.pem +``` + +With this configuration, the `CApath` option tells Netdata to search for trusted certificates inside `/etc/ssl/certs`. The `CAfile` option specifies the Netdata master certificate is located at `/etc/ssl/certs/master_cert.pem`. With this configuration, you can skip using the system's entire list of certificates and use Netdata's master certificate instead. + #### Expected behaviors -With the introduction of SSL, the master-slave communication behaves as shown in the table below, depending on the following configurations: -- Master TLS (Yes/No): Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`. -- Master port SSL (-/force/optional): Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming. -- Slave TLS (Yes/No): Whether the destination in the slave's `stream.conf` has `:SSL` at the end. -- Slave SSL Verification (yes/no): Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no). +With the introduction of TLS/SSL, the master-slave communication behaves as shown in the table below, depending on the following configurations: + +- **Master TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`. +- **Master port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming. +- **Slave TLS (Yes/No)**: Whether the destination in the slave's `stream.conf` has `:SSL` at the end. +- **Slave TLS Verification (yes/no)**: Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no). Master TLS enabled | Master port SSL | Slave TLS | Slave SSL Ver. | Behavior :------:|:-----:|:-----:|:-----:|:-------- No | - | No | no | Legacy behavior. The master-slave stream is unencrypted. Yes | force | No | no | The master rejects the slave connection. Yes | -/optional | No | no | The master-slave stream is unencrypted (expected situation for legacy slaves and newer masters) -Yes | -/force/optional | Yes | no | The master-slave stream is encrypted, provided that the master has a valid SSL certificate. Otherwise, the slave refuses to connect. +Yes | -/force/optional | Yes | no | The master-slave stream is encrypted, provided that the master has a valid TLS/SSL certificate. Otherwise, the slave refuses to connect. Yes | -/force/optional | Yes | yes | The master-slave stream is encrypted. ## Viewing remote host dashboards, using mirrored databases diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c index 954b1d7d..59913c24 100644 --- a/streaming/rrdpush.c +++ b/streaming/rrdpush.c @@ -48,6 +48,11 @@ unsigned int default_rrdpush_enabled = 0; char *default_rrdpush_destination = NULL; char *default_rrdpush_api_key = NULL; char *default_rrdpush_send_charts_matching = NULL; +#ifdef ENABLE_HTTPS +int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL; +char *netdata_ssl_ca_path = NULL; +char *netdata_ssl_ca_file = NULL; +#endif static void load_stream_conf() { errno = 0; @@ -89,13 +94,17 @@ int rrdpush_init() { } } } + char *invalid_certificate = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", "no"); if ( !strcmp(invalid_certificate,"yes")){ if (netdata_validate_server == NETDATA_SSL_VALID_CERTIFICATE){ - info("The Netdata is configured to accept invalid certificate."); + info("Netdata is configured to accept invalid SSL certificate."); netdata_validate_server = NETDATA_SSL_INVALID_CERTIFICATE; } } + + netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", "/etc/ssl/certs/"); + netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", "/etc/ssl/certs/certs.pem"); #endif return default_rrdpush_enabled; @@ -652,6 +661,7 @@ void *rrdpush_sender_thread(void *ptr) { #ifdef ENABLE_HTTPS if (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE ){ security_start_ssl(NETDATA_SSL_CONTEXT_STREAMING); + security_location_for_context(netdata_client_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path); } #endif @@ -801,7 +811,17 @@ void *rrdpush_sender_thread(void *ptr) { rrdpush_buffer_lock(host); debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_sender_buffer)); - ssize_t ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); + ssize_t ret; +#ifdef ENABLE_HTTPS + SSL *conn = host->ssl.conn ; + if(conn && !host->ssl.flags) { + ret = SSL_write(conn,&host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin); + } else { + ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); + } +#else + ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); +#endif if (unlikely(ret == -1)) { if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) { debug(D_STREAM, "STREAM: Send failed - closing socket..."); @@ -1059,6 +1079,8 @@ static int rrdpush_receive(int fd info("STREAM %s [receive from [%s]:%s]: initializing communication...", host->hostname, client_ip, client_port); #ifdef ENABLE_HTTPS + host->ssl.conn = ssl->conn; + host->ssl.flags = ssl->flags; if(send_timeout(ssl,fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { #else if(send_timeout(fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { diff --git a/streaming/stream.conf b/streaming/stream.conf index 0d360cc2..fdff1f25 100644 --- a/streaming/stream.conf +++ b/streaming/stream.conf @@ -41,6 +41,22 @@ # #ssl skip certificate verification = yes + # Certificate Authority Path + # + # OpenSSL has a default directory where the known certificates are stored, + # case it is necessary it is possible to change this rule using the variable + # "CApath" + # + #CApath = /etc/ssl/certs/ + + # Certificate Authority file + # + # When the Netdata master has certificate, that is not recognized as valid, + # we can add this certificate in the list of known certificates in CApath + # and give for Netdata as argument. + # + #CAfile = /etc/ssl/certs/cert.pem + # The API_KEY to use (as the sender) api key = diff --git a/tests/Makefile.am b/tests/Makefile.am index b0f65456..92e6db0f 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -5,6 +5,11 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in CLEANFILES = \ health_mgmtapi/health-cmdapi-test.sh \ +<<<<<<< HEAD + acls/acl.sh \ +======= + urls/request.sh \ +>>>>>>> 63a4cadd346df71255d2350128eebcf317e81d0f $(NULL) include $(top_srcdir)/build/subst.inc @@ -22,10 +27,14 @@ dist_noinst_DATA = \ node.d/fronius.process.spec.js \ node.d/fronius.validation.spec.js \ health_mgmtapi/health-cmdapi-test.sh.in \ + acls/acl.sh.in \ + urls/request.sh.in \ $(NULL) dist_plugins_SCRIPTS = \ health_mgmtapi/health-cmdapi-test.sh \ + acls/acl.sh \ + urls/request.sh \ $(NULL) dist_noinst_SCRIPTS = \ diff --git a/tests/acls/acl.sh.in b/tests/acls/acl.sh.in new file mode 100644 index 00000000..772d6640 --- /dev/null +++ b/tests/acls/acl.sh.in @@ -0,0 +1,119 @@ +#!/bin/bash -x +# SPDX-License-Identifier: GPL-3.0-or-later + +BASICURL="http://127.0.0.1" +BASICURLS="https://127.0.0.1" + +NETDATA_VARLIB_DIR="/var/lib/netdata" +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;43m' + +#change the previous acl file and with a new +#and store it on a new file +change_file(){ + sed "s/$1/$2/g" netdata.cfg > "$4" +} + +change_ssl_file(){ + KEYROW="ssl key = $3/key.pem" + CERTROW="ssl certificate = $3/cert.pem" + sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp + sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2 + sed "s/$1/$2/g" tmp2 > "$4" +} + +run_acl_tests() { + #Give a time for netdata start properly + sleep 2 + + curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt + curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt + curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt + curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt + curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt + + TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1) + if [ "$TOT" -ne "$4" ]; then + echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4" + rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt + killall netdata + exit 1 + elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then + echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP." + return + fi + + TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1) + if [ "$TOT" -ne "$3" ]; then + echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3." + killall netdata + rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt + exit 1 + fi + + echo -e "${GREEN}ACLs were applied correctly" +} + +CONF=$(grep "bind" netdata.cfg) +MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//') + +openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem +chown "$MUSER" key.pem cert.pem +CWD=$(pwd) + +if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then + read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key" +else + TOKEN="NULL" +fi + +change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0" +netdata -c "netdata.conf.test0" +run_acl_tests $TOKEN "$BASICURL:19999" 5 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1" +netdata -c "netdata.conf.test1" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 0 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20002" 3 5 +run_acl_tests $TOKEN "$BASICURLS:20002" 3 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2" +netdata -c "netdata.conf.test2" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20002" 3 5 +run_acl_tests $TOKEN "$BASICURLS:20002" 3 0 +killall netdata + +change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3" +netdata -c "netdata.conf.test3" +run_acl_tests $TOKEN "$BASICURL:19999" 5 5 +run_acl_tests $TOKEN "$BASICURLS:19999" 5 0 + +run_acl_tests $TOKEN "$BASICURL:20000" 4 0 +run_acl_tests $TOKEN "$BASICURLS:20000" 4 0 + +run_acl_tests $TOKEN "$BASICURL:20001" 4 5 +run_acl_tests $TOKEN "$BASICURLS:20001" 4 0 +killall netdata + +rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt +echo "All the tests were successful" diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg new file mode 100644 index 00000000..1dcb4a5c --- /dev/null +++ b/tests/acls/netdata.cfg @@ -0,0 +1,20 @@ +# netdata configuration +# +# You can download the latest version of this file, using: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# or +# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# +# You can uncomment and change any of the options below. +# The value shown in the commented settings, is the default value. +# + +[global] + run as user = netdata + + # the default database size - 1 hour + history = 3600 + + # by default do not expose the netdata port + bind to = localhost diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg new file mode 100644 index 00000000..28e0030d --- /dev/null +++ b/tests/acls/netdata.ssl.cfg @@ -0,0 +1,24 @@ +# netdata configuration +# +# You can download the latest version of this file, using: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# or +# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# +# You can uncomment and change any of the options below. +# The value shown in the commented settings, is the default value. +# + +[global] + run as user = netdata + + # the default database size - 1 hour + history = 3600 + + # by default do not expose the netdata port + bind to = localhost + +[web] + ssl key = + ssl certificate = diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in new file mode 100644 index 00000000..fac00bc4 --- /dev/null +++ b/tests/urls/request.sh.in @@ -0,0 +1,303 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later + +################################################################################################ +#### #### +#### GLOBAL VARIABLES #### +#### #### +################################################################################################ + +# The current time +CT=$(date +'%s') + +# The previous time +PT=$((CT - 30)) + +# The output directory where we will store the results and error +OUTDIR="tests" +OUTEDIR="encoded_tests" +OUTOPTDIR="options" +ERRDIR="etests" + +################################################################################################ +#### #### +#### FUNCTIONS #### +#### #### +################################################################################################ + +# Print error message and close script +netdata_print_error(){ + echo "Closing due error \"$1\" code \"$2\"" + exit 1 +} + +# Print the header message of the function +netdata_print_header() { + echo "$1" +} + +# Create the main directory where the results will be stored +netdata_create_directory() { + netdata_print_header "Creating directory $1" + if [ ! -d "$1" ]; then + mkdir "$1" + TEST=$? + if [ $TEST -ne 0 ]; then + netdata_print_error "Cannot create directory $?" + fi + else + echo "Working with directory $OUTDIR" + fi +} + +#Check whether download did not have problem +netdata_test_download(){ + grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null + TEST=$? + if [ $TEST -ne 0 ]; then + netdata_print_error "Cannot do download of the page $2" $? + exit 1 + fi +} + +#Check whether download had a problem +netdata_error_test(){ + grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null + TEST=$? + if [ $TEST -eq 0 ]; then + netdata_print_error "The page $2 did not answer with an error" $? + exit 1 + fi +} + + +# Download information from Netdata +netdata_download_various() { + netdata_print_header "Getting $2" + curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err" + netdata_test_download "$OUTDIR/$3.err" "$1/$2" +} + +netdata_download_various_with_options() { + netdata_print_header "Getting options for $2" + curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err" + netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2" +} + +# Download information from Netdata +netdata_wrong_request_various() { + netdata_print_header "Getting $2" + curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err" + netdata_error_test "$ERRDIR/$3.err" "$1/$2" +} + +# Download charts from Netdata +netdata_download_charts() { + curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err" + netdata_test_download "$OUTDIR/charts.err" "$1/$2" + + #Rewrite the next + grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort +} + +#Test options for a specific chart +netdata_download_chart() { + SEPARATOR="&" + EQUAL="=" + OUTD=$OUTDIR + ENCODED=" " + for I in $(seq 0 1); do + if [ "$I" -eq "1" ] ; then + SEPARATOR="%26" + EQUAL="%3D" + OUTD=$OUTEDIR + ENCODED="encoded" + fi + + NAME=${3//\"/} + netdata_print_header "Getting data for $NAME using $4 $ENCODED" + + LDIR=$OUTD"/"$4 + + LURL="$1/$2$EQUAL$NAME" + + NAME=$NAME"_$4" + + curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err" + netdata_test_download "$LDIR/$NAME.err" "$LURL" + + UFILES=( "points" "before" "after" ) + COUNTER=0 + for OPT in "points=100" "before=$PT" "after=$CT" ; + do + LURL="$LURL$SEPARATOR$OPT" + LFILE=$NAME"_${UFILES[$COUNTER]}"; + + curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err" + netdata_test_download "$LDIR/$LFILE.err" "$LURL" + + COUNTER=$((COUNTER + 1)) + done + + LURL="$LURL&group$EQUAL" + for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average"; + do + TURL=$LURL$OPT + TFILE=$NAME"_$OPT"; + curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err" + netdata_test_download "$LDIR/$TFILE.err" "$TURL" + for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array"; + do + TURL=$TURL"&format="$MORE + TFILE=$NAME"_$OPT""_$MORE"; + curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err" + netdata_test_download "$LDIR/$TFILE.err" "$TURL" + done + done + + LURL="$LURL$OPT>ime=60" + NFILE=$NAME"_gtime" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&options=percentage" + NFILE=$NAME"_percentage" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&dimensions=system%7Cnice" + NFILE=$NAME"_dimension" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + + LURL="$LURL$OPT&label=testing" + NFILE=$NAME"_label" + curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err" + netdata_test_download "$LDIR/$NFILE.err" "$LURL" + done +} + +# Download information from Netdata +netdata_download_allmetrics() { + netdata_print_header "Getting All metrics" + LURL="$1/api/v1/allmetrics?format=" + for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ; + do + TURL=$LURL$FMT + for OPT in "yes" "no"; + do + if [ "$FMT" == "prometheus" ]; then + TURL="$TURL&help=$OPT&types=$OPT×tamps=$OPT" + fi + TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND" + + NAME="allmetrics_$FMT" + echo "$OUTDIR/$2/$NAME.out" + curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err" + netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL" + done + done +} + + +################################################################################################ +#### #### +#### MAIN ROUTINE #### +#### #### +################################################################################################ +MURL="http://127.0.0.1:19999" + +netdata_create_directory $OUTDIR +netdata_create_directory $OUTEDIR +netdata_create_directory $OUTOPTDIR +netdata_create_directory $ERRDIR + +wget --execute="robots = off" --mirror --convert-links --no-parent http://127.0.0.1:19999 +TEST=$? +if [ $TEST -ne "0" ] ; then + echo "Cannot connect to Netdata" + exit 1 +fi + +netdata_download_various $MURL "netdata.conf" "netdata.conf" + +netdata_download_various_with_options $MURL "netdata.conf" "netdata.conf" + +netdata_wrong_request_various $MURL "api/v15/info?this%20could%20not%20be%20here" "err_version" + +netdata_wrong_request_various $MURL "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2" + +netdata_download_various $MURL "api/v1/info" "info" +netdata_download_various_with_options $MURL "api/v1/info" "info" +netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info" + +netdata_print_header "Getting all the netdata charts" +CHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts" ) +WCHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts?this%20could%20not%20be%20here" ) +WCHARTS2=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts%3fthis%20could%20not%20be%20here" ) + +if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then + echo "The number of charts does not match with division not encoded."; + exit 2; +elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then + echo "The number of charts does not match when everything is encoded"; + exit 3; +fi + +netdata_wrong_request_various $MURL "api/v1/chart" "err_chart_without_chart" +netdata_wrong_request_various $MURL "api/v1/chart?_=234231424242" "err_chart_arg" + +netdata_download_various $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args" +netdata_download_various_with_options $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args" + +netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded" +netdata_download_various_with_options $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded" +netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2" +netdata_download_various $MURL "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3" + +netdata_create_directory "$OUTDIR/chart" +for I in $CHARTS ; do + NAME=${I//\"/} + netdata_download_various $MURL "api/v1/chart?chart=$NAME" "chart/$NAME" +done + +netdata_wrong_request_various $MURL "api/v1/alarm_variables" "err_alarm_variables_without_chart" +netdata_wrong_request_various $MURL "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg" +netdata_download_various $MURL "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args" + +netdata_create_directory "$OUTDIR/alarm_variables" +for I in $CHARTS ; do + NAME=${I//\"/} + netdata_download_various $MURL "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME" +done + +netdata_create_directory "$OUTDIR/badge" +netdata_create_directory "$OUTEDIR/badge" +for I in $CHARTS ; do + netdata_download_chart $MURL "api/v1/badge.svg?chart" "$I" "badge" +done + +netdata_create_directory "$OUTDIR/allmetrics" +netdata_download_allmetrics $MURL "allmetrics" + +netdata_download_various $MURL "api/v1/alarms?all" "alarms_all" +netdata_download_various $MURL "api/v1/alarms?active" "alarms_active" +netdata_download_various $MURL "api/v1/alarms" "alarms_nothing" + +netdata_download_various $MURL "api/v1/alarm_log?after" "alarm_without" +netdata_download_various $MURL "api/v1/alarm_log" "alarm_nothing" +netdata_download_various $MURL "api/v1/alarm_log?after&_=$PT" "alarm_log" + +netdata_create_directory "$OUTDIR/data" +netdata_create_directory "$OUTEDIR/data" +for I in $CHARTS ; do + netdata_download_chart $MURL "api/v1/data?chart" "$I" "data" + break; +done + +#http://arch-esxi:19999/api/v1/(*@&$!$%%5E)!$*%&)!$*%%5E*!%5E%!%5E$%!%5E%(!*%5E*%5E%(*@&$%5E%(!%5E#*&!^#$*&!^%)@($%^)!*&^(!*&^#$&#$)!$%^)!$*%&)#$!^#*$^!(*#^#)!%^!)$*%&!(*&$!^#$*&^!*#^$!*^)%(!*&$%)(!&#$!^*#&$^!*^%)!$%)!(&#$!^#*&^$ + +WHITE='\033[0;37m' +echo -e "${WHITE}ALL the URLS got 200 as answer!" + +exit 0 diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c index b24fdded..4f9826fb 100644 --- a/web/api/badges/web_buffer_svg.c +++ b/web/api/badges/web_buffer_svg.c @@ -11,7 +11,7 @@ * https://github.com/badges/shields/blob/master/measure-text.js */ -static double verdana11_widths[256] = { +static double verdana11_widths[128] = { [0] = 0.0, [1] = 0.0, [2] = 0.0, @@ -139,157 +139,36 @@ static double verdana11_widths[256] = { [124] = 4.9951171875, // | [125] = 6.982421875, // } [126] = 9.001953125, // ~ - [127] = 0.0, - [128] = 0.0, - [129] = 0.0, - [130] = 0.0, - [131] = 0.0, - [132] = 0.0, - [133] = 0.0, - [134] = 0.0, - [135] = 0.0, - [136] = 0.0, - [137] = 0.0, - [138] = 0.0, - [139] = 0.0, - [140] = 0.0, - [141] = 0.0, - [142] = 0.0, - [143] = 0.0, - [144] = 0.0, - [145] = 0.0, - [146] = 0.0, - [147] = 0.0, - [148] = 0.0, - [149] = 0.0, - [150] = 0.0, - [151] = 0.0, - [152] = 0.0, - [153] = 0.0, - [154] = 0.0, - [155] = 0.0, - [156] = 0.0, - [157] = 0.0, - [158] = 0.0, - [159] = 0.0, - [160] = 0.0, - [161] = 0.0, - [162] = 0.0, - [163] = 0.0, - [164] = 0.0, - [165] = 0.0, - [166] = 0.0, - [167] = 0.0, - [168] = 0.0, - [169] = 0.0, - [170] = 0.0, - [171] = 0.0, - [172] = 0.0, - [173] = 0.0, - [174] = 0.0, - [175] = 0.0, - [176] = 0.0, - [177] = 0.0, - [178] = 0.0, - [179] = 0.0, - [180] = 0.0, - [181] = 0.0, - [182] = 0.0, - [183] = 0.0, - [184] = 0.0, - [185] = 0.0, - [186] = 0.0, - [187] = 0.0, - [188] = 0.0, - [189] = 0.0, - [190] = 0.0, - [191] = 0.0, - [192] = 0.0, - [193] = 0.0, - [194] = 0.0, - [195] = 0.0, - [196] = 0.0, - [197] = 0.0, - [198] = 0.0, - [199] = 0.0, - [200] = 0.0, - [201] = 0.0, - [202] = 0.0, - [203] = 0.0, - [204] = 0.0, - [205] = 0.0, - [206] = 0.0, - [207] = 0.0, - [208] = 0.0, - [209] = 0.0, - [210] = 0.0, - [211] = 0.0, - [212] = 0.0, - [213] = 0.0, - [214] = 0.0, - [215] = 0.0, - [216] = 0.0, - [217] = 0.0, - [218] = 0.0, - [219] = 0.0, - [220] = 0.0, - [221] = 0.0, - [222] = 0.0, - [223] = 0.0, - [224] = 0.0, - [225] = 0.0, - [226] = 0.0, - [227] = 0.0, - [228] = 0.0, - [229] = 0.0, - [230] = 0.0, - [231] = 0.0, - [232] = 0.0, - [233] = 0.0, - [234] = 0.0, - [235] = 0.0, - [236] = 0.0, - [237] = 0.0, - [238] = 0.0, - [239] = 0.0, - [240] = 0.0, - [241] = 0.0, - [242] = 0.0, - [243] = 0.0, - [244] = 0.0, - [245] = 0.0, - [246] = 0.0, - [247] = 0.0, - [248] = 0.0, - [249] = 0.0, - [250] = 0.0, - [251] = 0.0, - [252] = 0.0, - [253] = 0.0, - [254] = 0.0, - [255] = 0.0 + [127] = 0.0 }; // find the width of the string using the verdana 11points font -// re-write the string in place, skiping zero-length characters -static inline double verdana11_width(char *s) { +static inline double verdana11_width(const char *s, float em_size) { double w = 0.0; - char *d = s; while(*s) { - double t = verdana11_widths[(unsigned char)*s]; - if(t == 0.0) + // if UTF8 multibyte char found and guess it's width equal 1em + // as label width will be updated with JavaScript this is not so important + + // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata + // then use url_utf8_get_byte_length etc. + if(IS_UTF8_STARTBYTE(*s)) { s++; + while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){ + s++; + } + w += em_size; + } else { - w += t + VERDANA_KERNING; - if(d != s) - *d++ = *s++; - else - d = ++s; + if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8 + double t = verdana11_widths[(unsigned char)*s]; + if(t != 0.0) + w += t + VERDANA_KERNING; + } + s++; } } - *d = '\0'; w -= VERDANA_KERNING; w += VERDANA_PADDING; return w; @@ -810,8 +689,7 @@ static inline void calc_colorz(const char *color, char *final, size_t len, calcu #define COLOR_STRING_SIZE 100 void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) { - char label_buffer[LABEL_STRING_SIZE + 1] - , value_color_buffer[COLOR_STRING_SIZE + 1] + char value_color_buffer[COLOR_STRING_SIZE + 1] , value_string[VALUE_STRING_SIZE + 1] , label_escaped[LABEL_STRING_SIZE + 1] , value_escaped[VALUE_STRING_SIZE + 1] @@ -831,14 +709,11 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision); - // we need to copy the label, since verdana11_width may write to it - strncpyz(label_buffer, label, LABEL_STRING_SIZE); - - label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2); - value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2); + label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2); + value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2); total_width = label_width + value_width; - escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE); + escape_xmlz(label_escaped, label, LABEL_STRING_SIZE); escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE); escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE); @@ -862,19 +737,43 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch "<stop offset=\"1\" stop-opacity=\".1\"/>" "</linearGradient>" "<mask id=\"round\">" - "<rect width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>" + "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>" "</mask>" "<g mask=\"url(#round)\">" - "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" - "<rect x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" - "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>" + "<rect class=\"bdge-rect-lbl\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" + "<rect class=\"bdge-rect-val\" x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" + "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>" "</g>" "<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">" - "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" + "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" + "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\">%s</text>" + "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" + "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\">%s</text>" "</g>" + "<script type=\"text/javascript\">" + "var bdg_horiz_padding = %d;" + "function netdata_bdge_each(list, attr, value){" + "Array.prototype.forEach.call(list, function(el){" + "el.setAttribute(attr, value);" + "});" + "};" + "var this_svg = document.currentScript.closest(\"svg\");" + "var elem_lbl = this_svg.getElementsByClassName(\"bdge-lbl-lbl\");" + "var elem_val = this_svg.getElementsByClassName(\"bdge-lbl-val\");" + "var lbl_size = elem_lbl[0].getBBox();" + "var val_size = elem_val[0].getBBox();" + "var width_total = lbl_size.width + bdg_horiz_padding*2;" + "this_svg.getElementsByClassName(\"bdge-rect-lbl\")[0].setAttribute(\"width\", width_total);" + "netdata_bdge_each(elem_lbl, \"x\", (lbl_size.width / 2) + bdg_horiz_padding);" + "netdata_bdge_each(elem_val, \"x\", width_total + (val_size.width / 2) + bdg_horiz_padding);" + "var val_rect = this_svg.getElementsByClassName(\"bdge-rect-val\")[0];" + "val_rect.setAttribute(\"width\", val_size.width + bdg_horiz_padding*2);" + "val_rect.setAttribute(\"x\", width_total);" + "width_total += val_size.width + bdg_horiz_padding*2;" + "var width_update_elems = this_svg.getElementsByClassName(\"bdge-ttl-width\");" + "netdata_bdge_each(width_update_elems, \"width\", width_total);" + "this_svg.setAttribute(\"width\", width_total);" + "</script>" "</svg>", total_width, height, total_width, height, round_corner, @@ -885,7 +784,8 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch label_width / 2, ceil(height - text_offset), label_escaped, label_width / 2, ceil(height - text_offset - 1.0), label_escaped, label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped); + label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped, + BADGE_HORIZONTAL_PADDING ); } int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { diff --git a/web/api/health/README.md b/web/api/health/README.md index 66a80d5f..0b4f79f3 100644 --- a/web/api/health/README.md +++ b/web/api/health/README.md @@ -50,35 +50,39 @@ From Netdata v1.16.0 and beyond, the configuration controlled via the API comman Specifically, the API allows you to: - Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log. - Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the netdata UI will show the alarms as active, but no notifications will be sent. - - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. + - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://localhost:19999/netdata.conf`: -```bash +``` [registry] # netdata management api key file = /var/lib/netdata/netdata.api.key ``` -You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this: +You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this: ``` -curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" ``` -The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled. +By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in netdata.conf within the [web] section. See [web server access lists](../../server/#access-lists) for more information. + +The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled. If you've configured and entered your token correclty, you should see the plain text response `All health checks and notifications are enabled`. ### Disable or silence all alarms If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts: + ``` -curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken" ``` + The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log. If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this: ``` -curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken" ``` Alarms may then still be raised and logged in netdata, so you'll be able to see them via the UI. @@ -86,44 +90,44 @@ Alarms may then still be raised and logged in netdata, so you'll be able to see Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command. ``` - curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" + curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" ``` ### Disable or silence specific alarms -If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used. +If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used. Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced. -- `DISABLE` : Set the mode to disable health checks. -- `SILENCE` : Set the mode to silence notifications. +- `DISABLE` : Set the mode to disable health checks. +- `SILENCE` : Set the mode to silence notifications. -You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well. -You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa. +You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well. +You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa. -Each request can specify a single alarm `selector`, with one or more `selection criteria`. -A single alarm will match a `selector` if all selection criteria match the alarm. +Each request can specify a single alarm `selector`, with one or more `selection criteria`. +A single alarm will match a `selector` if all selection criteria match the alarm. You can add as many selectors as you like. In essence, the rule is: IF (alarm matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command. To clear all selectors and reset the mode to default, use the `RESET` command. -The following example silences notifications for all the alarms with context=load: +The following example silences notifications for all the alarms with context=load: ``` -curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken" +curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken" ``` -#### Selection criteria +#### Selection criteria -The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two). +The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two). The accepted keys for the `selection criteria` are the following: -- `alarm` : The expression provided will match both `alarm` and `template` names. +- `alarm` : The expression provided will match both `alarm` and `template` names. - `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`. - `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`. - `hosts` : The hostnames that will need to match. - `families` : The alarm families. -You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts. +You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts. Example 1: Disable all health checks for context = `random` @@ -152,6 +156,7 @@ The command `LIST` was added in netdata v1.16.0 and returns a JSON with the curr ``` As an example, the following response shows that we have two silencers configured, one for an alarm called `samplealarm` and one for alarms with context `random` on host `myhost` + ``` json { @@ -178,7 +183,7 @@ json "type": "DISABLE", "silencers": [] } - +``` ### Responses diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c index 468054c6..94293dbe 100644 --- a/web/api/health/health_cmdapi.c +++ b/web/api/health/health_cmdapi.c @@ -179,6 +179,7 @@ int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, c silencer = health_silencers_addparam(silencer, key, value); } } + if (likely(silencer)) { health_silencers_add(silencer); buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED); diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c index 7c0d728b..2273224b 100644 --- a/web/api/web_api_v1.c +++ b/web/api/web_api_v1.c @@ -797,23 +797,23 @@ inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char * } // get the command - char *tok = mystrsep(&url, "?"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, tok); - uint32_t hash = simple_hash(tok); + if(url) { + debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, url); + uint32_t hash = simple_hash(url); for(i = 0; api_commands[i].command ;i++) { - if(unlikely(hash == api_commands[i].hash && !strcmp(tok, api_commands[i].command))) { + if(unlikely(hash == api_commands[i].hash && !strcmp(url, api_commands[i].command))) { if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) return web_client_permission_denied(w); - return api_commands[i].callback(host, w, url); + //return api_commands[i].callback(host, w, url); + return api_commands[i].callback(host, w, (w->decoded_query_string + 1)); } } buffer_flush(w->response.data); buffer_strcat(w->response.data, "Unsupported v1 API command: "); - buffer_strcat_htmlescape(w->response.data, tok); + buffer_strcat_htmlescape(w->response.data, url); return 404; } else { diff --git a/web/gui/console.html b/web/gui/console.html index 942c8c3c..9b172644 100644 --- a/web/gui/console.html +++ b/web/gui/console.html @@ -10,7 +10,7 @@ <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" /> - <link rel="icon" href="favicon.ico" /> + <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" /> <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" /> <!-- Google Tag Manager --> <script>(function (w, d, s, l, i) { diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js index e7d99a15..a32a29be 100644 --- a/web/gui/dashboard.js +++ b/web/gui/dashboard.js @@ -8118,10 +8118,10 @@ let chartState = function (element) { }; this.chartDataUniqueID = function () { - return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(); + return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(true); }; - this.chartURLOptions = function () { + this.chartURLOptions = function (isForUniqueId) { let ret = ''; if (this.override_options !== null) { @@ -8136,7 +8136,9 @@ let chartState = function (element) { ret += '%7C' + 'jsonwrap'; - if (NETDATA.options.current.eliminate_zero_dimensions) { + // always add `nonzero` when it's used to create a chartDataUniqueID + // we cannot just remove `nonzero` because of backwards compatibility with old snapshots + if (isForUniqueId || NETDATA.options.current.eliminate_zero_dimensions) { ret += '%7C' + 'nonzero'; } diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js index 0013311e..df924264 100644 --- a/web/gui/dashboard_info.js +++ b/web/gui/dashboard_info.js @@ -154,7 +154,7 @@ netdataDashboard.menu = { 'zfs': { title: 'ZFS filesystem', icon: '<i class="fas fa-folder-open"></i>', - info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arcstat/arcstat.py" target="_blank">arcstat.py</a> and <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arc_summary/arc_summary.py" target="_blank">arc_summary.py</a>.' + info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arcstat/arcstat" target="_blank">arcstat.py</a> and <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arc_summary/arc_summary3" target="_blank">arc_summary.py</a>.' }, 'btrfs': { diff --git a/web/gui/demosites.html b/web/gui/demosites.html index e00fbbfd..f5d26380 100644 --- a/web/gui/demosites.html +++ b/web/gui/demosites.html @@ -10,7 +10,7 @@ <meta name=viewport content="width=device-width,initial-scale=1"> <link rel=apple-touch-icon href=apple-touch-icon.png> - <link rel="icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAAA7EAAAOxAGVKw4bAAACyklEQVRYhcWXz0sUYRjHP8+wLItImEhnEfEgEVJk+56CbCECQWezOkTQTTp6kejgKUSoEAoK/wAhaaeDBkLoIahZIWIJo0U8hHQJJBYJERGfDrOju7PjuO5u6/c0PM/7zvfzvvP+eEaoVu6wBZIA3cW82wtvY7cD1xEWSTpb1bxWjm2RtbtQxoF7iLSA7gO/UL4A74G3mBIzNz0Peg1hFmUK46zXBuDaCYQJlDFE4ke/QreAZyhPMc42rt2PyEoxt4cyDUxgnO3qAVy7C8gg0hdFHwBZA0ZIOt9w7e+I9B6mdBVhmGTlbFQCuPYFhA8g56o3P3D6CwyiDCIyFshtgqRIZnKlUStg3lW7OYC0AvPATkiuA9U3uOlEOEDWTgCZ2s3LIMbCU9ID3A8HUCZO9s2jGCRxdFIflDUF/EX3I3q1N0iq+8BZf+v6MzDeFHMAEQvoJpuOAwiu3Qr89g6ZJkm5CnoHeBwDbjTV3CPoRRhF+WQBA801B+AJiAUMWMDFptuLtBef+iygs+kAh+q0gDOnCNBmETyOmywLKJyifyEG/ATqPP+LUt0FZoAlIA6MgNyKqDo2YkAO6G+QeQrjfCyJzuHaoyCvjuiVs4q0jdBMwNyTcV6j+jm0h7BkAYuohpZLJ1TUQJYrIqo7KAtW8VaabQBA1GVWmROZwzgFfwtOoRpealevdGg0a8eAoUB0D9VJ8M8A46yDTNdlL9wmaz8MMX9RrIQOpfoS4+S9br7cdAvoCiLn6wLxFtwyQhxlKMQ8j3CJpFeml+9QN90N6iLSURfEkXD8AQwms+aHyo9hk1kHSaG6+Z/MU6XmlQAeRA7EoLraQPM83si/BlPhF5E3E1dQfY5S++5Q/9dMLwdH7uv4n1PX7gEeAXejy+0y4x2QOWASk8lHNT0e4AAk3QZ6E6+E68MrZNqK2QKwgXevLCEskHSqumX/AUXU5QBtOC5FAAAAAElFTkSuQmCC"> + <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" /> <meta property="og:url" content="https://my-netdata.io" /> <meta property="og:type" content="website" /> @@ -1100,40 +1100,7 @@ p { </div> </div> </div> - <div class="mygauge-combo"> - <div class="mygauge"> - <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;"> - <b>EU - France</b> - </div> - <div class="mysparkline"> - <div class="mysparkline-overchart-label2"> - requests/s - </div> - <div class="mysparkline-overchart-value" id="ventureer.requests.netdata" > - </div> - <div data-netdata="netdata.requests" - data-dimensions="requests" - data-host="//ventureer.my-netdata.io" - data-common-max="top-gauges" - data-decimal-digits="0" - data-chart-library="dygraph" - data-dygraph-theme="sparkline" - data-dygraph-type="area" - data-width="100%" - data-height="100%" - data-after="-300" - data-colors="#FF4B91" - data-show-value-of-requests-at="ventureer.requests.netdata" - ></div> - </div> - </div> - <div class="mygauge-button"> - <a class="btn btn-alt mygauge-legend-button" href=//ventureer.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoVentureer><strong>Enter Roubaix!</strong></a> - <div class="mygause-donation"> - Donated by ventureer.com - </div> - </div> - </div> + <div class="mygauge-combo"> <div class="mygauge"> <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;"> diff --git a/web/gui/images/packaging-beta-tag.svg b/web/gui/images/packaging-beta-tag.svg new file mode 100644 index 00000000..cebdc084 --- /dev/null +++ b/web/gui/images/packaging-beta-tag.svg @@ -0,0 +1,42 @@ +<svg width="127" height="16" viewBox="0 0 127 16" fill="none" xmlns="http://www.w3.org/2000/svg"> +<rect width="127" height="16" rx="2" fill="url(#paint0_linear)"/> +<path d="M86 0H125C126.105 0 127 0.895431 127 2V14C127 15.1046 126.105 16 125 16H86V0Z" fill="url(#paint1_linear)"/> +<path d="M6.5 4V10" stroke="white" stroke-linecap="round" stroke-linejoin="round"/> +<path d="M12.5 7C13.3284 7 14 6.32843 14 5.5C14 4.67157 13.3284 4 12.5 4C11.6716 4 11 4.67157 11 5.5C11 6.32843 11.6716 7 12.5 7Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/> +<path d="M6.5 13C7.32843 13 8 12.3284 8 11.5C8 10.6716 7.32843 10 6.5 10C5.67157 10 5 10.6716 5 11.5C5 12.3284 5.67157 13 6.5 13Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/> +<path d="M12.5 7C12.5 8.19347 12.0259 9.33807 11.182 10.182C10.3381 11.0259 9.19347 11.5 8 11.5" stroke="white" stroke-linecap="round" stroke-linejoin="round"/> +<g filter="url(#filter0_d)"> +<path d="M23.939 7.93848V11H22.9077V3.17969H25.792C26.6478 3.17969 27.3174 3.39811 27.8008 3.83496C28.2878 4.27181 28.5312 4.8501 28.5312 5.56982C28.5312 6.32894 28.2931 6.91439 27.8169 7.32617C27.3442 7.73438 26.6657 7.93848 25.7812 7.93848H23.939ZM23.939 7.09521H25.792C26.3434 7.09521 26.766 6.96631 27.0596 6.7085C27.3532 6.4471 27.5 6.07113 27.5 5.58057C27.5 5.11507 27.3532 4.74268 27.0596 4.46338C26.766 4.18408 26.3631 4.03906 25.8511 4.02832H23.939V7.09521ZM33.6548 11C33.5976 10.8854 33.551 10.6813 33.5152 10.3877C33.0533 10.8675 32.5018 11.1074 31.8609 11.1074C31.288 11.1074 30.8171 10.9463 30.4483 10.624C30.0831 10.2982 29.9004 9.88639 29.9004 9.38867C29.9004 8.78353 30.1296 8.31445 30.5879 7.98145C31.0499 7.64486 31.698 7.47656 32.5323 7.47656H33.4991V7.02002C33.4991 6.67269 33.3952 6.39697 33.1876 6.19287C32.9799 5.98519 32.6737 5.88135 32.2691 5.88135C31.9146 5.88135 31.6174 5.97087 31.3775 6.1499C31.1376 6.32894 31.0176 6.54557 31.0176 6.7998H30.0186C30.0186 6.50977 30.1207 6.23047 30.3248 5.96191C30.5324 5.68978 30.8117 5.47493 31.1627 5.31738C31.5171 5.15983 31.9057 5.08105 32.3282 5.08105C32.9978 5.08105 33.5224 5.24935 33.9019 5.58594C34.2815 5.91895 34.4784 6.37907 34.4927 6.96631V9.64111C34.4927 10.1746 34.5608 10.599 34.6968 10.9141V11H33.6548ZM32.0059 10.2427C32.3174 10.2427 32.6129 10.1621 32.8921 10.001C33.1714 9.83984 33.3738 9.63037 33.4991 9.37256V8.18018H32.7203C31.5028 8.18018 30.8941 8.53646 30.8941 9.24902C30.8941 9.56055 30.9979 9.80404 31.2056 9.97949C31.4133 10.1549 31.6801 10.2427 32.0059 10.2427ZM38.8214 10.2964C39.1759 10.2964 39.4856 10.189 39.7506 9.97412C40.0156 9.75928 40.1624 9.49072 40.191 9.16846H41.131C41.1131 9.50146 40.9985 9.81836 40.7872 10.1191C40.576 10.4199 40.2931 10.6598 39.9386 10.8389C39.5877 11.0179 39.2153 11.1074 38.8214 11.1074C38.0301 11.1074 37.3999 10.8442 36.9308 10.3179C36.4653 9.78792 36.2325 9.06462 36.2325 8.14795V7.98145C36.2325 7.41569 36.3364 6.9126 36.5441 6.47217C36.7517 6.03174 37.0489 5.68978 37.4357 5.44629C37.826 5.2028 38.2861 5.08105 38.816 5.08105C39.4677 5.08105 40.0084 5.2762 40.4381 5.6665C40.8714 6.0568 41.1023 6.56348 41.131 7.18652H40.191C40.1624 6.81055 40.0192 6.5026 39.7613 6.2627C39.5071 6.01921 39.192 5.89746 38.816 5.89746C38.3112 5.89746 37.9191 6.08008 37.6398 6.44531C37.364 6.80697 37.2262 7.33154 37.2262 8.01904V8.20703C37.2262 8.87663 37.364 9.39225 37.6398 9.75391C37.9155 10.1156 38.3094 10.2964 38.8214 10.2964ZM44.3102 8.30908L43.6872 8.95898V11H42.6935V2.75H43.6872V7.73975L44.2189 7.10059L46.029 5.18848H47.2375L44.9762 7.61621L47.5007 11H46.3351L44.3102 8.30908ZM52.2913 11C52.234 10.8854 52.1874 10.6813 52.1516 10.3877C51.6897 10.8675 51.1383 11.1074 50.4973 11.1074C49.9244 11.1074 49.4535 10.9463 49.0847 10.624C48.7195 10.2982 48.5369 9.88639 48.5369 9.38867C48.5369 8.78353 48.766 8.31445 49.2244 7.98145C49.6863 7.64486 50.3344 7.47656 51.1687 7.47656H52.1355V7.02002C52.1355 6.67269 52.0316 6.39697 51.824 6.19287C51.6163 5.98519 51.3101 5.88135 50.9055 5.88135C50.551 5.88135 50.2538 5.97087 50.0139 6.1499C49.774 6.32894 49.654 6.54557 49.654 6.7998H48.655C48.655 6.50977 48.7571 6.23047 48.9612 5.96191C49.1689 5.68978 49.4482 5.47493 49.7991 5.31738C50.1536 5.15983 50.5421 5.08105 50.9646 5.08105C51.6342 5.08105 52.1588 5.24935 52.5383 5.58594C52.9179 5.91895 53.1148 6.37907 53.1291 6.96631V9.64111C53.1291 10.1746 53.1972 10.599 53.3332 10.9141V11H52.2913ZM50.6423 10.2427C50.9538 10.2427 51.2493 10.1621 51.5286 10.001C51.8079 9.83984 52.0102 9.63037 52.1355 9.37256V8.18018H51.3567C50.1392 8.18018 49.5305 8.53646 49.5305 9.24902C49.5305 9.56055 49.6343 9.80404 49.842 9.97949C50.0497 10.1549 50.3165 10.2427 50.6423 10.2427ZM54.8904 8.0459C54.8904 7.13997 55.0999 6.42025 55.5188 5.88672C55.9378 5.34961 56.4928 5.08105 57.1839 5.08105C57.8929 5.08105 58.4461 5.33171 58.8436 5.83301L58.8919 5.18848H59.7996V10.8604C59.7996 11.6123 59.5758 12.2049 59.1282 12.6382C58.6842 13.0715 58.0862 13.2881 57.3343 13.2881C56.9153 13.2881 56.5053 13.1986 56.1043 13.0195C55.7033 12.8405 55.3971 12.5952 55.1858 12.2837L55.7015 11.6875C56.1276 12.2139 56.6486 12.4771 57.2645 12.4771C57.7479 12.4771 58.1238 12.341 58.3924 12.0688C58.6645 11.7967 58.8006 11.4136 58.8006 10.9194V10.4199C58.4031 10.8783 57.8606 11.1074 57.1731 11.1074C56.4928 11.1074 55.9414 10.8335 55.5188 10.2856C55.0999 9.73779 54.8904 8.99121 54.8904 8.0459ZM55.8895 8.15869C55.8895 8.81396 56.0237 9.32959 56.2923 9.70557C56.5608 10.078 56.9368 10.2642 57.4202 10.2642C58.0468 10.2642 58.507 9.97949 58.8006 9.41016V6.75684C58.4962 6.20182 58.0397 5.92432 57.431 5.92432C56.9476 5.92432 56.5698 6.1123 56.2977 6.48828C56.0255 6.86426 55.8895 7.42106 55.8895 8.15869ZM62.8231 11H61.8295V5.18848H62.8231V11ZM61.7489 3.64697C61.7489 3.48584 61.7972 3.34977 61.8939 3.23877C61.9942 3.12777 62.141 3.07227 62.3343 3.07227C62.5277 3.07227 62.6745 3.12777 62.7748 3.23877C62.875 3.34977 62.9252 3.48584 62.9252 3.64697C62.9252 3.80811 62.875 3.94238 62.7748 4.0498C62.6745 4.15723 62.5277 4.21094 62.3343 4.21094C62.141 4.21094 61.9942 4.15723 61.8939 4.0498C61.7972 3.94238 61.7489 3.80811 61.7489 3.64697ZM65.7983 5.18848L65.8305 5.91895C66.2745 5.36035 66.8546 5.08105 67.5707 5.08105C68.7989 5.08105 69.4184 5.77393 69.4291 7.15967V11H68.4355V7.1543C68.4319 6.73535 68.3352 6.42562 68.1454 6.2251C67.9592 6.02458 67.6674 5.92432 67.2699 5.92432C66.9477 5.92432 66.6648 6.01025 66.4213 6.18213C66.1778 6.354 65.988 6.57959 65.852 6.85889V11H64.8583V5.18848H65.7983ZM71.1313 8.0459C71.1313 7.13997 71.3408 6.42025 71.7597 5.88672C72.1787 5.34961 72.7337 5.08105 73.4248 5.08105C74.1338 5.08105 74.687 5.33171 75.0845 5.83301L75.1328 5.18848H76.0405V10.8604C76.0405 11.6123 75.8167 12.2049 75.3691 12.6382C74.9251 13.0715 74.3271 13.2881 73.5752 13.2881C73.1562 13.2881 72.7462 13.1986 72.3452 13.0195C71.9442 12.8405 71.638 12.5952 71.4267 12.2837L71.9424 11.6875C72.3685 12.2139 72.8895 12.4771 73.5054 12.4771C73.9888 12.4771 74.3647 12.341 74.6333 12.0688C74.9054 11.7967 75.0415 11.4136 75.0415 10.9194V10.4199C74.644 10.8783 74.1015 11.1074 73.414 11.1074C72.7337 11.1074 72.1823 10.8335 71.7597 10.2856C71.3408 9.73779 71.1313 8.99121 71.1313 8.0459ZM72.1304 8.15869C72.1304 8.81396 72.2646 9.32959 72.5332 9.70557C72.8017 10.078 73.1777 10.2642 73.6611 10.2642C74.2877 10.2642 74.7479 9.97949 75.0415 9.41016V6.75684C74.7371 6.20182 74.2806 5.92432 73.6719 5.92432C73.1885 5.92432 72.8107 6.1123 72.5386 6.48828C72.2664 6.86426 72.1304 7.42106 72.1304 8.15869Z" fill="white"/> +</g> +<g filter="url(#filter1_d)"> +<path d="M93.7949 12V4.17969H96.4751C97.3595 4.17969 98.0327 4.35693 98.4946 4.71143C98.9565 5.06592 99.1875 5.59408 99.1875 6.2959C99.1875 6.65397 99.0908 6.97624 98.8975 7.2627C98.7041 7.54915 98.4212 7.77116 98.0488 7.92871C98.4714 8.04329 98.7972 8.25993 99.0264 8.57861C99.2591 8.89372 99.3755 9.27327 99.3755 9.71729C99.3755 10.4513 99.1392 11.0153 98.6665 11.4092C98.1974 11.8031 97.5243 12 96.647 12H93.7949ZM95.1538 8.47119V10.915H96.6631C97.0892 10.915 97.4222 10.8094 97.6621 10.5981C97.902 10.3869 98.022 10.0933 98.022 9.71729C98.022 8.90446 97.6066 8.4891 96.7759 8.47119H95.1538ZM95.1538 7.47217H96.4858C96.9084 7.47217 97.2378 7.37728 97.4741 7.1875C97.714 6.99414 97.834 6.72201 97.834 6.37109C97.834 5.98438 97.723 5.70508 97.501 5.5332C97.2826 5.36133 96.9406 5.27539 96.4751 5.27539H95.1538V7.47217ZM105.745 8.50879H102.533V10.915H106.288V12H101.174V4.17969H106.25V5.27539H102.533V7.43457H105.745V8.50879ZM113.592 5.27539H111.153V12H109.805V5.27539H107.388V4.17969H113.592V5.27539ZM118.796 10.1792H115.767L115.133 12H113.72L116.674 4.17969H117.894L120.853 12H119.435L118.796 10.1792ZM116.148 9.0835H118.415L117.281 5.83936L116.148 9.0835Z" fill="white"/> +</g> +<defs> +<filter id="filter0_d" x="21.9077" y="2.75" width="55.1328" height="12.5381" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB"> +<feFlood flood-opacity="0" result="BackgroundImageFix"/> +<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/> +<feOffset dy="1"/> +<feGaussianBlur stdDeviation="0.5"/> +<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/> +<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/> +<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/> +</filter> +<filter id="filter1_d" x="92.7949" y="4.17969" width="29.0583" height="9.82031" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB"> +<feFlood flood-opacity="0" result="BackgroundImageFix"/> +<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/> +<feOffset dy="1"/> +<feGaussianBlur stdDeviation="0.5"/> +<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/> +<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/> +<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/> +</filter> +<linearGradient id="paint0_linear" x1="63.5" y1="0" x2="63.5" y2="16" gradientUnits="userSpaceOnUse"> +<stop stop-color="#606060"/> +<stop offset="1" stop-color="#4D4D4D"/> +</linearGradient> +<linearGradient id="paint1_linear" x1="106.5" y1="0" x2="106.5" y2="16" gradientUnits="userSpaceOnUse"> +<stop stop-color="#BFBFBF"/> +<stop offset="1" stop-color="#7F7F7F"/> +</linearGradient> +</defs> +</svg> diff --git a/web/gui/index.html b/web/gui/index.html index 4a8647dd..58b07440 100644 --- a/web/gui/index.html +++ b/web/gui/index.html @@ -16,23 +16,6 @@ <link rel="stylesheet" type="text/css" href="main.css?v=5"> <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" /> - <!-- <link rel="apple-touch-icon" sizes="57x57" href="images/apple-icon-57x57.png"> - <link rel="apple-touch-icon" sizes="60x60" href="images/apple-icon-60x60.png"> - <link rel="apple-touch-icon" sizes="72x72" href="images/apple-icon-72x72.png"> - <link rel="apple-touch-icon" sizes="76x76" href="images/apple-icon-76x76.png"> - <link rel="apple-touch-icon" sizes="114x114" href="images/apple-icon-114x114.png"> - <link rel="apple-touch-icon" sizes="120x120" href="images/apple-icon-120x120.png"> - <link rel="apple-touch-icon" sizes="144x144" href="images/apple-icon-144x144.png"> - <link rel="apple-touch-icon" sizes="152x152" href="images/apple-icon-152x152.png"> - <link rel="apple-touch-icon" sizes="180x180" href="images/apple-icon-180x180.png"> - <link rel="icon" type="image/png" sizes="192x192" href="images/android-icon-192x192.png"> - <link rel="icon" type="image/png" sizes="32x32" href="images/favicon-32x32.png"> - <link rel="icon" type="image/png" sizes="96x96" href="images/favicon-96x96.png"> - <link rel="icon" type="image/png" sizes="16x16" href="images/favicon-16x16.png"> - <link rel="manifest" href="manifest.json"> - <meta name="msapplication-TileColor" content="#ffffff"> - <meta name="msapplication-TileImage" content="images/ms-icon-144x144.png"> - <meta name="theme-color" content="#ffffff"> --> <meta property="og:locale" content="en_US" /> <meta property="og:url" content="https://my-netdata.io" /> @@ -80,23 +63,6 @@ </script> <nav class="navbar navbar-default navbar-fixed-top" role="banner"> <div class="container"> - <!-- <nav id="mynetdata_nav" class="collapse navbar-collapse navbar-left" role="navigation" style="padding-right: 20px;"> - <ul class="nav navbar-nav"> - <li data-placement="right" style="line-height: 50px; margin-right: 15px" title="Netdata Agent"> - <img src="images/netdata-logomark.svg" height="32"/> - </li> - <li class="dropdown" id="myNetdataDropdownParent" title="your other netdata servers" data-toggle="tooltip" data-placement="right"> - <a href="#" id="hostname" class="dropdown-toggle" data-toggle="dropdown">my-netdata <strong class="caret"></strong></a> - <div id="my-netdata-dropdown-content" class="dropdown-menu scrollable-menu inpagemenu"> - <div class="agent-item" style="white-space: nowrap"> - <i class="fas fa-hourglass-half"></i> - Loading, please wait... - <div></div> - </div> - </div> - </li> - </ul> - </nav> --> <div class="navbar-header"> <button class="navbar-toggle" type="button" data-toggle="collapse" data-target=".navbar-collapse"> <span class="sr-only">Toggle navigation</span> @@ -104,13 +70,14 @@ <span class="icon-bar"></span> <span class="icon-bar"></span> </button> - <!-- <a href="/" class="navbar-brand" id="1hostname" title="server hostname<br/>click it to reload the dashboard" data-toggle="tooltip" data-placement="bottom">netdata</a> --> <ul class="nav navbar-nav" style="display: inline-block"> <li data-placement="right" class="hidden-xs hidden-sm" style="line-height: 50px; margin-right: 15px" title="Netdata Agent"> <img src="images/netdata-logomark.svg" height="32"/> </li> <li class="dropdown" id="myNetdataDropdownParent" title="your other netdata servers" data-toggle="tooltip" data-placement="right"> - <a href="#" id="hostname" class="dropdown-toggle" data-toggle="dropdown">my-netdata <strong class="caret"></strong></a> + <a href="#" id="hostname" class="dropdown-toggle" data-toggle="dropdown"> + my-netdata + <strong class="caret"></strong></a> <div id="my-netdata-dropdown-content" class="dropdown-menu scrollable-menu inpagemenu"> <div class="agent-item" style="white-space: nowrap"> <i class="fas fa-hourglass-half"></i> @@ -135,26 +102,6 @@ <li title="print this dashboard to PDF" data-toggle="tooltip" data-placement="bottom" id="printButton"><a href="#" class="btn" data-toggle="modal" data-target="#printPreflightModal"><i class="fas fa-print"></i> <span class="hidden-sm hidden-md hidden-lg">Print</span></a></li> <li title="get help on using the charts" data-toggle="tooltip" data-placement="bottom" class="hidden-sm"><a href="#" class="btn" data-toggle="modal" data-target="#helpModal"><i class="fas fa-question-circle"></i> <span class="hidden-sm hidden-md">Help</span></a></li> <li id="account-menu-container" class="dropdown" data-toggle="tooltip"></li> - <!-- - <li class="dropdown hidden-sm hidden-md hidden-lg" id="myNetdataDropdownParent" title="your other netdata servers" data-toggle="tooltip" data-placement="right"> - <a href="#" class="dropdown-toggle" data-toggle="dropdown">my-netdata <strong class="caret"></strong></a> - <div id="my-netdata-dropdown-content" class="dropdown-menu scrollable-menu inpagemenu"> - <div class="agent-item" style="white-space: nowrap"> - <i class="fas fa-hourglass-half"></i> - Loading, please wait... - <div></div> - </div> - </div> - </li> - --> - <!-- - <li class="dropdown hidden-sm hidden-md hidden-lg"> - <a href="#" class="dropdown-toggle" data-toggle="dropdown">My Nodes <strong class="caret"></strong></a> - <ul id="mynetdata_servers2" class="dropdown-menu scrollable-menu inpagemenu" role="menu"> - <li><a href="#" onclick="return false;" style="color: #999;">loading...</a></li> - </ul> - </li> - --> </ul> </nav> </div> diff --git a/web/gui/infographic.html b/web/gui/infographic.html index b3112781..24ff8f4e 100644 --- a/web/gui/infographic.html +++ b/web/gui/infographic.html @@ -9,13 +9,13 @@ <meta name=viewport content="width=device-width,initial-scale=1"> <link rel=apple-touch-icon href=apple-touch-icon.png> - <link rel="icon" type="image/png" sizes="32x32" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAACNklEQVRYhcXXv2tUQRAH8M+FEIJISBHCIWIhIQSUILERi4AiiqCggiIiomAjlhaC4j+ghYWISgqNohZaCBZBC8Ei8QdEUCutFBsxCBqDYkgci/cunkfuJffjJQPD8mZm5/vd2WV2HzlJ0Bs8CvrywsgCHwy+BpGOg0sJfjj4nYKX9FdwKG9gwZlgtgK8pLOpPxfw1mCoCnClDgWtzQTvCEYWCV7SkWAlFBoEb8dlDKBF8t2bMWUSH/AHr3CiEfz5CPUusPJLkRCdk5ZqyeqUrQv4R7E5TwK7M3zTeIKduRAIitiWEfIY69GdCwGcRFuG/xqONRkzkaA7+J5x+MaDtWmHvJ4HgeEM8Nn0bridfv9HoOFyBAdwJCPkqqTzHWwUaz7wgeBHxupfBKuCj2W25mxBsCGYyAB/FxTT27HcPlyep64tCLbjKbqqhLzBlgKfF8pVE4FgRXABI+ioEnYfOyzcFWsCbg+OV+xlpU4ER4O+4HVwL51b3xYEXcGu4Ao+YQhr5gmdxHmsQyfG0b/YxbWmLfRWmnxa0s06VbTMCpnBS9zFzQKTwR5cXCzwHIE02Sl8wSZsRI/kgLVJqjSd+t9LVjiG1diPszhdK3A5gR48k5zYMTwscC59sfT799CYKvA8EttbSeXgTr3gJQKl91kR+yTlvyG5uUbLYh9gb+ovltkb6qYtNSRo3kOygsBSzGlKsubf43USWLYK5CLLXoFWyU/CtzLbVDpW2n+m40yN9ukqdvAX9ac/EIgOapcAAAAASUVORK5CYII="> + <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" /> <meta property="og:url" content="https://my-netdata.io/infographic.html" /> <meta property="og:type" content="website" /> <meta property="og:title" content="netdata infographic" /> <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." /> - <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/25580009/bf7016a4-2e85-11e7-9a7a-b36c57db7b91.png" /> + <meta property="og:image" content="https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png" /> <meta property="og:image:type" content="image/png" /> <meta property="fb:app_id" content="1200089276712916" /> @@ -91,8 +91,8 @@ "toolbar":"", "auto-fit":true, "check-visible-state":false, - "edit":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml", - "url":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml" + "edit":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml", + "url":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml" }; document.getElementById("drawing").dataset.mxgraph = JSON.stringify(opts); </script> diff --git a/web/gui/main.css b/web/gui/main.css index 2ddb776e..b6ba9591 100644 --- a/web/gui/main.css +++ b/web/gui/main.css @@ -151,12 +151,6 @@ body.modal-open { /*width: 220px;*/ } -/* -.affix-top { - width: 220px; -} -*/ - .dashboard-sidebar { max-height: calc(100% - 70px) !important; overflow-y: auto; @@ -168,12 +162,6 @@ body.modal-open { position: static; } -@media (min-width: 768px) { - .dashboard-sidebar { - padding-left: 20px; - } -} - /* First level of nav */ .dashboard-sidenav { margin-top: 20px; @@ -353,146 +341,6 @@ body.modal-open { user-select: none; } -@media print { - body { - overflow: visible !important; - -webkit-print-color-adjust: exact; - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .dashboard-section { - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .dashboard-subsection { - page-break-before: avoid; - page-break-after: auto; - page-break-inside: auto; - } - - .charts-body { - padding-left: 0%; - padding-right: 0%; - display: block; - page-break-inside: auto; - page-break-before: auto; - page-break-after: auto; - } - - .back-to-top, - .dashboard-theme-toggle { - display: block; - } -} - -@media (min-width: 768px) { - .charts-body { - padding-left: 0%; - padding-right: 0%; - } - - .back-to-top, - .dashboard-theme-toggle { - display: block; - } -} - -/* Show and affix the side nav when space allows it */ -@media (min-width: 992px) { - .container { - padding-left: 0% !important; - } - - .charts-body { - width: calc(100% - 213px) !important; - padding-left: 1% !important; - padding-right: 0% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 213px !important; - } - - .dashboard-sidebar .nav > .active > ul { - display: block; - } - - /* Widen the fixed sidebar */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 213px !important; - } - - .dashboard-sidebar.affix { - position: fixed; /* Undo the static from mobile first approach */ - top: 20px; - } - - .dashboard-sidebar.affix-bottom { - position: absolute; /* Undo the static from mobile first approach */ - } - - .dashboard-sidebar.affix-bottom .dashboard-sidenav, - .dashboard-sidebar.affix .dashboard-sidenav { - margin-top: 0; - margin-bottom: 0; - } -} - -@media (min-width: 1200px) { - .container { - padding-left: 2% !important; - } - - .charts-body { - width: calc(100% - 233px) !important; - padding-left: 1% !important; - padding-right: 1% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 233px !important; - } - - /* Widen the fixed sidebar again */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 233px !important; - } -} - -@media (min-width: 1360px) { - .container { - padding-left: 3% !important; - } - - .charts-body { - width: calc(100% - 263px) !important; - padding-left: 1% !important; - padding-right: 2% !important; - } - - .sidebar-body { - display: inline-block !important; - width: 263px !important; - } - - /* Widen the fixed sidebar again */ - .dashboard-sidebar.affix, - .dashboard-sidebar.affix-top, - .dashboard-sidebar.affix-bottom { - width: 263px !important; - } -} - .action-button { position: relative; display: inline-block; @@ -664,8 +512,23 @@ body.modal-open { right: 19px; } +#myNetdataDropdownParent { + float: left; +} + #hostname { font-size: 18px; + overflow: hidden; + text-overflow: ellipsis; + max-width: 220px; + } + + #hostnametext { + white-space: pre; + float: left; + text-overflow: ellipsis; + overflow: hidden; + max-width: 160px; } .sign-in-btn { @@ -718,3 +581,180 @@ body.modal-open { .beta { color:#FFCC00; } + + +@media (min-width: 1400px) { + #hostname { + max-width: 600px !important; + } + + #hostnametext { + max-width: 540px !important; + } +} + +@media (min-width: 1360px) { + .container { + padding-left: 3% !important; + } + + #hostname { + max-width: 280px !important; + } + + #hostnametext { + max-width: 220px !important; + } + + .charts-body { + width: calc(100% - 263px) !important; + padding-left: 1% !important; + padding-right: 2% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 263px !important; + } + + /* Widen the fixed sidebar again */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 263px !important; + } +} + +@media (min-width: 1200px) { + #hostname { + max-width: 100px; + } + + #hostnametext { + max-width: 40px; + } + .container { + padding-left: 2% !important; + } + + + .charts-body { + width: calc(100% - 233px) !important; + padding-left: 1% !important; + padding-right: 1% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 233px !important; + } + + /* Widen the fixed sidebar again */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 233px !important; + } +} + +@media (min-width: 992px) { + .container { + padding-left: 0% !important; + } + + .charts-body { + width: calc(100% - 213px) !important; + padding-left: 1% !important; + padding-right: 0% !important; + } + + .sidebar-body { + display: inline-block !important; + width: 213px !important; + } + + .dashboard-sidebar .nav > .active > ul { + display: block; + } + + /* Widen the fixed sidebar */ + .dashboard-sidebar.affix, + .dashboard-sidebar.affix-top, + .dashboard-sidebar.affix-bottom { + width: 213px !important; + } + + .dashboard-sidebar.affix { + position: fixed; /* Undo the static from mobile first approach */ + top: 20px; + } + + .dashboard-sidebar.affix-bottom { + position: absolute; /* Undo the static from mobile first approach */ + } + + .dashboard-sidebar.affix-bottom .dashboard-sidenav, + .dashboard-sidebar.affix .dashboard-sidenav { + margin-top: 0; + margin-bottom: 0; + } +} + +@media (min-width: 860px) { + .dashboard-sidebar { + padding-left: 20px; + } + +} + +@media (min-width: 768px) { + .dashboard-sidebar { + padding-left: 20px; + } + + .charts-body { + padding-left: 0%; + padding-right: 0%; + } + + .back-to-top, + .dashboard-theme-toggle { + display: block; + } +} + +@media print { + body { + overflow: visible !important; + -webkit-print-color-adjust: exact; + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .dashboard-section { + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .dashboard-subsection { + page-break-before: avoid; + page-break-after: auto; + page-break-inside: auto; + } + + .charts-body { + padding-left: 0%; + padding-right: 0%; + display: block; + page-break-inside: auto; + page-break-before: auto; + page-break-after: auto; + } + + .back-to-top, + .dashboard-theme-toggle { + display: block; + } +} diff --git a/web/gui/main.js b/web/gui/main.js index 65c4d4a8..1214eba6 100644 --- a/web/gui/main.js +++ b/web/gui/main.js @@ -704,11 +704,11 @@ function restrictMyNetdataMenu() { </div>`); } -function openAuthenticatedUrl(url) { +function openAuthenticatedUrl(url) { if (isSignedIn()) { window.open(url); } else { - window.open(`${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}`); + window.open(`${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}&redirectUrl=${encodeURIComponent(window.location.origin + "/" + url)}`); } } @@ -1775,8 +1775,6 @@ function renderPage(menus, data) { if (urlOptions.mode === 'print') { chtml += '</div>'; } - - // console.log(' \------- ' + chart.id + ' (' + chart.priority + '): ' + chart.context + ' height: ' + menus[menu].submenus[submenu].height); } head += '</div>'; @@ -2747,7 +2745,7 @@ function initializeDynamicDashboardWithData(data) { } // update the dashboard hostname - document.getElementById('hostname').innerHTML = options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + ' <strong class="caret">'; + document.getElementById('hostname').innerHTML = '<span id="hostnametext">' + options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + '</span> <strong class="caret">'; document.getElementById('hostname').href = NETDATA.serverDefault; document.getElementById('netdataVersion').innerHTML = options.version; @@ -4899,6 +4897,9 @@ function handleSignInMessage(e) { cloudToken = e.data.token; netdataRegistryCallback(registryAgents); + if (e.data.redirectUrl) { + window.location.replace(e.data.redirectUrl); + } } function handleSignOutMessage(e) { diff --git a/web/gui/src/dashboard.js/main.js b/web/gui/src/dashboard.js/main.js index 13f3b4c7..564ee7d4 100644 --- a/web/gui/src/dashboard.js/main.js +++ b/web/gui/src/dashboard.js/main.js @@ -3071,10 +3071,10 @@ let chartState = function (element) { }; this.chartDataUniqueID = function () { - return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(); + return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(true); }; - this.chartURLOptions = function () { + this.chartURLOptions = function (isForUniqueId) { let ret = ''; if (this.override_options !== null) { @@ -3089,7 +3089,9 @@ let chartState = function (element) { ret += '%7C' + 'jsonwrap'; - if (NETDATA.options.current.eliminate_zero_dimensions) { + // always add `nonzero` when it's used to create a chartDataUniqueID + // we cannot just remove `nonzero` because of backwards compatibility with old snapshots + if (isForUniqueId || NETDATA.options.current.eliminate_zero_dimensions) { ret += '%7C' + 'nonzero'; } diff --git a/web/server/README.md b/web/server/README.md index df29f331..173e8959 100644 --- a/web/server/README.md +++ b/web/server/README.md @@ -59,42 +59,43 @@ The API requests are serviced as follows: ### Enabling TLS support +Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data between a slave and its master, via the TLS 1.2 protocol. -Netdata since version 1.16 supports encrypted HTTP connections to the web server and encryption of the data stream between a slave and a master. -Inbound unix socket connections are unaffected, regardless of the SSL settings. -To enable SSL, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`: +Inbound unix socket connections are unaffected, regardless of the TLS settings. +??? info "Differences in TLS and SSL terminology" + While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`. -``` +To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`: + +``` conf [web] ssl key = /etc/netdata/ssl/key.pem ssl certificate = /etc/netdata/ssl/cert.pem ``` -Both files must be readable by the netdata user. If any of the two files does not exist or is unreadable, Netdata falls back to HTTP. - -For a master/slave connection, only the master needs these settings. +Both files must be readable by the `netdata` user. If either of these files do not exist or are unreadable, Netdata will fall back to HTTP. For a master/slave connection, only the master needs these settings. For test purposes, you can generate self-signed certificates with the following command: -``` +``` bash $ openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem ``` -TIP: If you use 4096 bits for the key and the certificate, netdata will need more CPU to process the whole communication. -rsa4096 can be until 4 times slower than rsa2048, so we recommend using 2048 bits. You can verify the difference by running - -``` -$ openssl speed rsa2048 rsa4096 -``` +!!! note + If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication. `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. You can verify the difference by running: + + ``` + $ openssl speed rsa2048 rsa4096 + ``` -#### SSL enforcement +#### TLS/SSL enforcement When the certificates are defined and unless any other options are provided, a Netdata server will: + - Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, netdata.conf and badges. - Allow incoming slave connections to use both unencrypted and encrypted communications for streaming. -To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. -At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming. +To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming. SSL setting | HTTP requests | HTTPS requests | Unencrypted Streams | Encrypted Streams :------:|:-----:|:-----:|:-----:|:-------- @@ -109,12 +110,29 @@ Example: bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force ``` -For information how to configure the slaves to use TLS, check [securing the communication](../../streaming#securing-the-communication) in the streaming documentation. -You will find there additional details on the expected behavior for client and server nodes, when their respective SSL options are enabled. +For information how to configure the slaves to use TLS, check [securing the communication](../../streaming#securing-streaming-communications) in the streaming documentation. There you will find additional details on the expected behavior for client and server nodes, when their respective TLS options are enabled. + +When we define the use of SSL in a Netdata agent for different ports, Netdata will apply the behavior specified on each port. For example, using the configuration line below: + +``` +[web] + bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force *:20000=netdata.conf^SSL=optional *:20001=dashboard|registry +``` + +Netdata will: + +- Force all HTTP requests to the default port to be redirected to HTTPS (same port). +- Refuse unencrypted streaming connections from slaves on the default port. +- Allow both HTTP and HTTPS requests to port 20000 for netdata.conf +- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001. + +#### TLS/SSL errors + +When you start using Netdata with TLS, you may find errors in the Netdata log, which is stored at `/var/log/netdata/error.log` by default. -#### SSL error +Most of the time, these errors are due to incompatibilities between your browser's options related to TLS/SSL protocols and Netdata's internal configuration. The most common error is `error:00000006:lib(0):func(0):EVP lib`. -It is possible that when you start to use the Netdata with SSL some erros will be register in the logs, this happens due possible incompatibilities between the browser options related to SSL like Ciphers and TLS/SSL version and the Netdata internal configuration. The most common error would be `error:00000006:lib(0):func(0):EVP lib`. In a near future the Netdata will allow our users to change the internal configuration to avoid errors like this, but until there we are setting the most common and safety options to the communication. +In the near future, Netdata will allow our users to change the internal configuration to avoid similar errors. Until then, we're recommending only the most common and safe encryption protocols, which you can find above. ### Access lists diff --git a/web/server/web_client.c b/web/server/web_client.c index bd275f5e..2da6c1de 100644 --- a/web/server/web_client.c +++ b/web/server/web_client.c @@ -16,8 +16,8 @@ inline int web_client_permission_denied(struct web_client *w) { w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); buffer_strcat(w->response.data, "You are not allowed to access this resource."); - w->response.code = 403; - return 403; + w->response.code = HTTP_RESP_FORBIDDEN; + return HTTP_RESP_FORBIDDEN; } static inline int web_client_crock_socket(struct web_client *w) { @@ -337,7 +337,7 @@ static inline int access_to_file_is_not_permitted(struct web_client *w, const ch w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Access to file is not permitted: "); buffer_strcat_htmlescape(w->response.data, filename); - return 403; + return HTTP_RESP_FORBIDDEN; } int mysendfile(struct web_client *w, char *filename) { @@ -357,7 +357,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_sprintf(w->response.data, "Filename contains invalid characters: "); buffer_strcat_htmlescape(w->response.data, filename); - return 400; + return HTTP_RESP_BAD_REQUEST; } } @@ -367,7 +367,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Relative filenames are not supported: "); buffer_strcat_htmlescape(w->response.data, filename); - return 400; + return HTTP_RESP_BAD_REQUEST; } // find the physical file on disk @@ -383,7 +383,7 @@ int mysendfile(struct web_client *w, char *filename) { w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "File does not exist, or is not accessible: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; + return HTTP_RESP_NOT_FOUND; } if ((statbuf.st_mode & S_IFMT) == S_IFDIR) { @@ -422,14 +422,14 @@ int mysendfile(struct web_client *w, char *filename) { buffer_sprintf(w->response.header, "Location: /%s\r\n", filename); buffer_strcat(w->response.data, "File is currently busy, please try again later: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 307; + return HTTP_RESP_REDIR_TEMP; } else { error("%llu: Cannot open file '%s'.", w->id, webfilename); w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Cannot open file: "); buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; + return HTTP_RESP_NOT_FOUND; } } @@ -451,7 +451,7 @@ int mysendfile(struct web_client *w, char *filename) { #endif /* __APPLE__ */ buffer_cacheable(w->response.data); - return 200; + return HTTP_RESP_OK; } @@ -570,7 +570,7 @@ static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char //if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) { // buffer_flush(w->response.data); // buffer_strcat(w->response.data, "This host does not maintain a database"); - // return 400; + // return HTTP_RESP_BAD_REQUEST; //} return func(host, w, url); @@ -603,13 +603,13 @@ int web_client_api_request(RRDHOST *host, struct web_client *w, char *url) w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "Unsupported API version: "); buffer_strcat_htmlescape(w->response.data, tok); - return 404; + return HTTP_RESP_NOT_FOUND; } } else { buffer_flush(w->response.data); buffer_sprintf(w->response.data, "Which API version?"); - return 400; + return HTTP_RESP_BAD_REQUEST; } } @@ -687,25 +687,25 @@ const char *web_content_type_to_string(uint8_t contenttype) { const char *web_response_code_to_string(int code) { switch(code) { - case 200: + case HTTP_RESP_OK: return "OK"; - case 301: + case HTTP_RESP_MOVED_PERM: return "Moved Permanently"; - case 307: + case HTTP_RESP_REDIR_TEMP: return "Temporary Redirect"; - case 400: + case HTTP_RESP_BAD_REQUEST: return "Bad Request"; - case 403: + case HTTP_RESP_FORBIDDEN: return "Forbidden"; - case 404: + case HTTP_RESP_NOT_FOUND: return "Not Found"; - case 412: + case HTTP_RESP_PRECOND_FAIL: return "Preconditions Failed"; default: @@ -772,7 +772,6 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u // terminate the value *ve = '\0'; - // fprintf(stderr, "HEADER: '%s' = '%s'\n", s, v); uint32_t hash = simple_uhash(s); if(hash == hash_origin && !strcasecmp(s, "Origin")) @@ -812,65 +811,35 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u return ve; } -// http_request_validate() -// returns: -// = 0 : all good, process the request -// > 0 : request is not supported -// < 0 : request is incomplete - wait for more data - -typedef enum { - HTTP_VALIDATION_OK, - HTTP_VALIDATION_NOT_SUPPORTED, -#ifdef ENABLE_HTTPS - HTTP_VALIDATION_INCOMPLETE, - HTTP_VALIDATION_REDIRECT -#else - HTTP_VALIDATION_INCOMPLETE -#endif -} HTTP_VALIDATION; - -static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { - char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL; - - size_t last_pos = w->header_parse_last_size; - if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n - else last_pos = 0; - - w->header_parse_tries++; - w->header_parse_last_size = buffer_strlen(w->response.data); - - if(w->header_parse_tries > 1) { - if(w->header_parse_last_size < last_pos) - last_pos = 0; - - if(strstr(&s[last_pos], "\r\n\r\n") == NULL) { - if(w->header_parse_tries > 10) { - info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data)); - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - web_client_disable_wait_receive(w); - return HTTP_VALIDATION_NOT_SUPPORTED; - } - - return HTTP_VALIDATION_INCOMPLETE; - } - } - +/** + * Valid Method + * + * Netdata accepts only three methods, including one of these three(STREAM) is an internal method. + * + * @param w is the structure with the client request + * @param s is the start string to parse + * + * @return it returns the next address to parse case the method is valid and NULL otherwise. + */ +static inline char *web_client_valid_method(struct web_client *w, char *s) { // is is a valid request? if(!strncmp(s, "GET ", 4)) { - encoded_url = s = &s[4]; + s = &s[4]; w->mode = WEB_CLIENT_MODE_NORMAL; } else if(!strncmp(s, "OPTIONS ", 8)) { - encoded_url = s = &s[8]; + s = &s[8]; w->mode = WEB_CLIENT_MODE_OPTIONS; } else if(!strncmp(s, "STREAM ", 7)) { + s = &s[7]; + #ifdef ENABLE_HTTPS - if ( (w->ssl.flags) && (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE)){ + if (w->ssl.flags && web_client_is_using_ssl_force(w)){ w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); + char hostname[256]; char *copyme = strstr(s,"hostname="); if ( copyme ){ @@ -891,29 +860,150 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { hostname[13] = 0x00; } error("The server is configured to always use encrypt connection, please enable the SSL on slave with hostname '%s'.",hostname); - return HTTP_VALIDATION_NOT_SUPPORTED; + s = NULL; } #endif - encoded_url = s = &s[7]; w->mode = WEB_CLIENT_MODE_STREAM; } else { + s = NULL; + } + + return s; +} + +/** + * Set Path Query + * + * Set the pointers to the path and query string according to the input. + * + * @param w is the structure with the client request + * @param s is the first address of the string. + * @param ptr is the address of the separator. + */ +static void web_client_set_path_query(struct web_client *w, char *s, char *ptr) { + w->url_path_length = (size_t)(ptr -s); + + w->url_search_path = ptr; +} + +/** + * Split path query + * + * Do the separation between path and query string + * + * @param w is the structure with the client request + * @param s is the string to parse + */ +void web_client_split_path_query(struct web_client *w, char *s) { + //I am assuming here that the separator character(?) is not encoded + char *ptr = strchr(s, '?'); + if(ptr) { + w->separator = '?'; + web_client_set_path_query(w, s, ptr); + return; + } + + //Here I test the second possibility, the URL is completely encoded by the user. + //I am not using the strcasestr, because it is fastest to check %3f and compare + //the next character. + //We executed some tests with "encodeURI(uri);" described in https://www.w3schools.com/jsref/jsref_encodeuri.asp + //on July 1st, 2019, that show us that URLs won't have '?','=' and '&' encoded, but we decided to move in front + //with the next part, because users can develop their own encoded that won't follow this rule. + char *moveme = s; + while (moveme) { + ptr = strchr(moveme, '%'); + if(ptr) { + char *test = (ptr+1); + if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) { + w->separator = *ptr; + web_client_set_path_query(w, s, ptr); + return; + } + ptr++; + } + + moveme = ptr; + } + + w->separator = 0x00; + w->url_path_length = strlen(s); + w->url_search_path = NULL; +} + +/** + * Request validate + * + * @param w is the structure with the client request + * + * @return It returns HTTP_VALIDATION_OK on success and another code present + * in the enum HTTP_VALIDATION otherwise. + */ +static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { + char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL; + + size_t last_pos = w->header_parse_last_size; + + w->header_parse_tries++; + w->header_parse_last_size = buffer_strlen(w->response.data); + + int is_it_valid; + if(w->header_parse_tries > 1) { + if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n + else last_pos = 0; + + if(w->header_parse_last_size < last_pos) + last_pos = 0; + + is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size); + if(!is_it_valid) { + if(w->header_parse_tries > 10) { + info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data)); + w->header_parse_tries = 0; + w->header_parse_last_size = 0; + web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } + + return HTTP_VALIDATION_INCOMPLETE; + } + + is_it_valid = 1; + } else { + last_pos = w->header_parse_last_size; + is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size); + } + + s = web_client_valid_method(w, s); + if (!s) { w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } else if (!is_it_valid) { + //Invalid request, we have more data after the end of message + char *check = strstr((char *)buffer_tostring(w->response.data), "\r\n\r\n"); + if(check) { + check += 4; + if (*check) { + w->header_parse_tries = 0; + w->header_parse_last_size = 0; + web_client_disable_wait_receive(w); + return HTTP_VALIDATION_NOT_SUPPORTED; + } + } + + web_client_enable_wait_receive(w); + return HTTP_VALIDATION_INCOMPLETE; } - // find the SPACE + "HTTP/" - while(*s) { - // find the next space - while (*s && *s != ' ') s++; + //After the method we have the path and query string together + encoded_url = s; - // is it SPACE + "HTTP/" ? - if(*s && !strncmp(s, " HTTP/", 6)) break; - else s++; - } + //we search for the position where we have " HTTP/", because it finishes the user request + s = url_find_protocol(s); // incomplete requests if(unlikely(!*s)) { @@ -924,6 +1014,10 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // we have the end of encoded_url - remember it char *ue = s; + //Variables used to map the variables in the query string case it is present + int total_variables; + char *ptr_variables[WEB_FIELDS_MAX]; + // make sure we have complete request // complete requests contain: \r\n\r\n while(*s) { @@ -941,15 +1035,41 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // a valid complete HTTP request found *ue = '\0'; - url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1); + if(w->mode != WEB_CLIENT_MODE_NORMAL) { + if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) + return HTTP_VALIDATION_MALFORMED_URL; + } else { + web_client_split_path_query(w, encoded_url); + + if (w->separator) { + *w->url_search_path = 0x00; + } + + if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) + return HTTP_VALIDATION_MALFORMED_URL; + + if (w->separator) { + *w->url_search_path = w->separator; + + char *from = (encoded_url + w->url_path_length); + total_variables = url_map_query_string(ptr_variables, from); + + if (url_parse_query_string(w->decoded_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1, ptr_variables, total_variables)) { + return HTTP_VALIDATION_MALFORMED_URL; + } + } + } *ue = ' '; - + // copy the URL - we are going to overwrite parts of it // TODO -- ideally we we should avoid copying buffers around strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE); + if (w->separator) { + *w->url_search_path = 0x00; + } #ifdef ENABLE_HTTPS if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) { - if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (netdata_use_ssl_on_http & NETDATA_SSL_FORCE) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) { + if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) { w->header_parse_tries = 0; w->header_parse_last_size = 0; web_client_disable_wait_receive(w); @@ -997,7 +1117,7 @@ static inline ssize_t web_client_send_data(struct web_client *w,const void *buf, } static inline void web_client_send_http_header(struct web_client *w) { - if(unlikely(w->response.code != 200)) + if(unlikely(w->response.code != HTTP_RESP_OK)) buffer_no_cacheable(w->response.data); // set a proper expiration date, if not already set @@ -1027,7 +1147,7 @@ static inline void web_client_send_http_header(struct web_client *w) { } char headerbegin[8328]; - if (w->response.code == 301) { + if (w->response.code == HTTP_RESP_MOVED_PERM) { memcpy(headerbegin,"\r\nLocation: https://",20); size_t headerlength = strlen(w->host); memcpy(&headerbegin[20],w->host,headerlength); @@ -1210,7 +1330,7 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch if(host != localhost) { buffer_flush(w->response.data); buffer_strcat(w->response.data, "Nesting of hosts is not allowed."); - return 400; + return HTTP_RESP_BAD_REQUEST; } char *tok = mystrsep(&url, "/"); @@ -1234,7 +1354,7 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "This netdata does not maintain a database for host: "); buffer_strcat_htmlescape(w->response.data, tok?tok:""); - return 404; + return HTTP_RESP_NOT_FOUND; } static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) { @@ -1279,7 +1399,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); config_generate(w->response.data, 0); - return 200; + return HTTP_RESP_OK; } #ifdef NETDATA_INTERNAL_CHECKS else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) { @@ -1296,7 +1416,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch error("web request to exit received."); netdata_cleanup_and_exit(0); - return 200; + return HTTP_RESP_OK; } else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) { if(unlikely(!web_client_can_access_netdataconf(w))) @@ -1317,7 +1437,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch buffer_strcat(w->response.data, "Chart is not found: "); buffer_strcat_htmlescape(w->response.data, tok); debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok); - return 404; + return HTTP_RESP_NOT_FOUND; } debug_flags |= D_RRD_STATS; @@ -1331,12 +1451,12 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); buffer_strcat_htmlescape(w->response.data, tok); debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); - return 200; + return HTTP_RESP_OK; } buffer_flush(w->response.data); buffer_strcat(w->response.data, "debug which chart?\r\n"); - return 400; + return HTTP_RESP_BAD_REQUEST; } else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) { if(unlikely(!web_client_can_access_netdataconf(w))) @@ -1350,7 +1470,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch // just leave the buffer as is // it will be copied back to the client - return 200; + return HTTP_RESP_OK; } #endif /* NETDATA_INTERNAL_CHECKS */ } @@ -1395,7 +1515,7 @@ void web_client_process_request(struct web_client *w) { w->response.data->contenttype = CT_TEXT_PLAIN; buffer_flush(w->response.data); buffer_strcat(w->response.data, "OK"); - w->response.code = 200; + w->response.code = HTTP_RESP_OK; break; case WEB_CLIENT_MODE_FILECOPY: @@ -1424,7 +1544,7 @@ void web_client_process_request(struct web_client *w) { buffer_flush(w->response.data); buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len); - w->response.code = 400; + w->response.code = HTTP_RESP_BAD_REQUEST; } else { // wait for more data @@ -1437,16 +1557,23 @@ void web_client_process_request(struct web_client *w) { buffer_flush(w->response.data); w->response.data->contenttype = CT_TEXT_HTML; buffer_strcat(w->response.data, "<!DOCTYPE html><!-- SPDX-License-Identifier: GPL-3.0-or-later --><html><body onload=\"window.location.href ='https://'+ window.location.hostname + ':' + window.location.port + window.location.pathname\">Redirecting to safety connection, case your browser does not support redirection, please click <a onclick=\"window.location.href ='https://'+ window.location.hostname + ':' + window.location.port + window.location.pathname\">here</a>.</body></html>"); - w->response.code = 301; + w->response.code = HTTP_RESP_MOVED_PERM; break; } #endif + case HTTP_VALIDATION_MALFORMED_URL: + debug(D_WEB_CLIENT_ACCESS, "%llu: URL parsing failed (malformed URL). Cannot understand '%s'.", w->id, w->response.data->buffer); + + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "URL not valid. I don't understand you...\r\n"); + w->response.code = HTTP_RESP_BAD_REQUEST; + break; case HTTP_VALIDATION_NOT_SUPPORTED: debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer); buffer_flush(w->response.data); buffer_strcat(w->response.data, "I don't understand you...\r\n"); - w->response.code = 400; + w->response.code = HTTP_RESP_BAD_REQUEST; break; } diff --git a/web/server/web_client.h b/web/server/web_client.h index 0a57e8d8..7cab46fc 100644 --- a/web/server/web_client.h +++ b/web/server/web_client.h @@ -11,6 +11,21 @@ extern int web_enable_gzip, web_gzip_strategy; #endif /* NETDATA_WITH_ZLIB */ +// HTTP_CODES 2XX Success +#define HTTP_RESP_OK 200 + +// HTTP_CODES 3XX Redirections +#define HTTP_RESP_MOVED_PERM 301 +#define HTTP_RESP_REDIR_TEMP 307 +#define HTTP_RESP_REDIR_PERM 308 + +// HTTP_CODES 4XX Client Errors +#define HTTP_RESP_BAD_REQUEST 400 +#define HTTP_RESP_FORBIDDEN 403 +#define HTTP_RESP_NOT_FOUND 404 +#define HTTP_RESP_PRECOND_FAIL 412 + + extern int respect_web_browser_do_not_track_policy; extern char *web_x_frame_options; @@ -21,6 +36,18 @@ typedef enum web_client_mode { WEB_CLIENT_MODE_STREAM = 3 } WEB_CLIENT_MODE; +typedef enum { + HTTP_VALIDATION_OK, + HTTP_VALIDATION_NOT_SUPPORTED, + HTTP_VALIDATION_MALFORMED_URL, +#ifdef ENABLE_HTTPS + HTTP_VALIDATION_INCOMPLETE, + HTTP_VALIDATION_REDIRECT +#else + HTTP_VALIDATION_INCOMPLETE +#endif +} HTTP_VALIDATION; + typedef enum web_client_flags { WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead @@ -128,8 +155,12 @@ struct web_client { char client_port[NI_MAXSERV+1]; char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer + char decoded_query_string[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the Query String in this buffer char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here char host[256]; + size_t url_path_length; + char separator; // This value can be either '?' or 'f' + char *url_search_path; //A pointer to the search path sent by the client struct timeval tv_in, tv_ready; @@ -159,6 +190,7 @@ struct web_client { #endif }; + extern uid_t web_files_uid(void); extern uid_t web_files_gid(void); |