diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2019-05-21 18:55:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2019-05-21 18:55:22 +0000 |
commit | 61d0027904ee9c040985b1642ca228737d616d03 (patch) | |
tree | c90838c137b4dc4a2de1cff9f2d7be5552d605e7 /packaging | |
parent | Adding upstream version 1.14.0. (diff) | |
download | netdata-61d0027904ee9c040985b1642ca228737d616d03.tar.xz netdata-61d0027904ee9c040985b1642ca228737d616d03.zip |
Adding upstream version 1.15.0.upstream/1.15.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'packaging')
38 files changed, 3244 insertions, 83 deletions
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile new file mode 100644 index 000000000..98fdce5c9 --- /dev/null +++ b/packaging/docker/Dockerfile @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# author : paulfantom + +# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option. +# It is automated in `build.sh` script +ARG ARCH=amd64 +# This image contains preinstalled dependecies +FROM netdata/builder:${ARCH} as builder + +ENV JUDY_VER 1.0.5 + +# Copy source +COPY . /opt/netdata.git +WORKDIR /opt/netdata.git + +# Install from source +RUN chmod +x netdata-installer.sh && ./netdata-installer.sh --dont-wait --dont-start-it + +# files to one directory +RUN mkdir -p /app/usr/sbin/ \ + /app/usr/share \ + /app/usr/libexec \ + /app/usr/lib \ + /app/var/cache \ + /app/var/lib \ + /app/etc && \ + mv /usr/share/netdata /app/usr/share/ && \ + mv /usr/libexec/netdata /app/usr/libexec/ && \ + mv /usr/lib/netdata /app/usr/lib/ && \ + mv /var/cache/netdata /app/var/cache/ && \ + mv /var/lib/netdata /app/var/lib/ && \ + mv /etc/netdata /app/etc/ && \ + mv /usr/sbin/netdata /app/usr/sbin/ && \ + mv /judy-${JUDY_VER} /app/judy-${JUDY_VER} && \ + mv packaging/docker/run.sh /app/usr/sbin/ && \ + chmod +x /app/usr/sbin/run.sh + +##################################################################### +ARG ARCH +# This image contains preinstalled dependecies +FROM netdata/base:${ARCH} + +# Conditional subscribiton to Polyverse's Polymorphic Linux repositories +RUN if [ "$(uname -m)" == "x86_64" ]; then \ + apk update && apk upgrade; \ + curl https://sh.polyverse.io | sh -s install gcxce5byVQbtRz0iwfGkozZwy support+netdata@polyverse.io; \ + if [ $? -eq 0 ]; then \ + apk update && \ + apk upgrade --available --no-cache && \ + sed -in 's/^#//g' /etc/apk/repositories; \ + fi \ + fi + +# Copy files over +RUN mkdir -p /opt/src +COPY --from=builder /app / + +# Configure system +ARG NETDATA_UID=201 +ARG NETDATA_GID=201 +RUN \ + # provide judy installation to base image + apk add make alpine-sdk && \ + cd /judy-${JUDY_VER} && make install && cd / && \ + # Clean the source stuff once judy is installed + rm -rf /judy-${JUDY_VER} && apk del make alpine-sdk && \ + # fping from alpine apk is on a different location. Moving it. + mv /usr/sbin/fping /usr/local/bin/fping && \ + chmod 4755 /usr/local/bin/fping && \ + mkdir -p /var/log/netdata && \ + # Add netdata user + addgroup -g ${NETDATA_GID} -S netdata && \ + adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ + # Apply the permissions as described in + # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories + chown -R root:netdata /etc/netdata && \ + chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \ + chown -R root:netdata /usr/lib/netdata && \ + chown -R root:netdata /usr/libexec/netdata/ && \ + chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \ + chmod 0750 /var/lib/netdata /var/cache/netdata && \ + # Link log files to stdout + ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log + +ENV NETDATA_PORT 19999 +EXPOSE $NETDATA_PORT + +ENTRYPOINT ["/usr/sbin/run.sh"] diff --git a/packaging/docker/README.md b/packaging/docker/README.md new file mode 100644 index 000000000..6ae299f1d --- /dev/null +++ b/packaging/docker/README.md @@ -0,0 +1,186 @@ +# Install netdata with Docker + +> :warning: As of Sep 9th, 2018 we ship [new docker builds](https://github.com/netdata/netdata/pull/3995), running netdata in docker with an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, not a COMMAND directive. Please adapt your execution scripts accordingly. You can find more information about ENTRYPOINT vs COMMAND is presented by goinbigdata [here](http://goinbigdata.com/docker-run-vs-cmd-vs-entrypoint/) and by docker docs [here](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). +> +> Also, the `latest` is now based on alpine, so **`alpine` is not updated any more** and `armv7hf` is now replaced with `armhf` (to comply with https://github.com/multiarch naming), so **`armv7hf` is not updated** either. + +## Limitations + +Running netdata in a container for monitoring the whole host, can limit its capabilities. Some data is not accessible or not as detailed as when running netdata on the host. + +## Package scrambling in runtime (x86_64 only) + +By default on x86_64 architecture our docker images use Polymorphic Polyverse Linux package scrambling. For increased security you can enable rescrambling of packages during runtime. To do this set environment variable `RESCRAMBLE=true` while starting netdata docker container. + +For more information go to [Polyverse site](https://polyverse.io/how-it-works/) + +## Run netdata with docker command + +Quickly start netdata with the docker command line. +Netdata is then available at http://host:19999 + +This is good for an internal network or to quickly analyse a host. + +```bash +docker run -d --name=netdata \ + -p 19999:19999 \ + -v /proc:/host/proc:ro \ + -v /sys:/host/sys:ro \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + --cap-add SYS_PTRACE \ + --security-opt apparmor=unconfined \ + netdata/netdata +``` + +The above can be converted to docker-compose file for ease of management: + +```yaml +version: '3' +services: + netdata: + image: netdata/netdata + hostname: example.com # set to fqdn of host + ports: + - 19999:19999 + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +### Docker container names resolution + +If you want to have your container names resolved by netdata it needs to have access to docker group. To achive that just add environment variable `PGID=999` to netdata container, where `999` is a docker group id from your host. This number can be found by running: +```bash +grep docker /etc/group | cut -d ':' -f 3 +``` + +### Pass command line options to Netdata + +Since we use an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, you can provide [netdata daemon command line options](https://docs.netdata.cloud/daemon/#command-line-options) such as the IP address netdata will be running on, using the [command instruction](https://docs.docker.com/engine/reference/builder/#cmd). + +## Install Netdata using Docker Compose with SSL/TLS enabled http proxy + +For a permanent installation on a public server, you should [secure the netdata instance](../../docs/netdata-security.md). This section contains an example of how to install netdata with an SSL reverse proxy and basic authentication. + +You can use use the following docker-compose.yml and Caddyfile files to run netdata with docker. Replace the Domains and email address for [Letsencrypt](https://letsencrypt.org/) before starting. + +### Prerequisites +* [Docker](https://docs.docker.com/install/#server) +* [Docker Compose](https://docs.docker.com/compose/install/) +* Domain configured in DNS pointing to host. + +### Caddyfile + +This file needs to be placed in /opt with name `Caddyfile`. Here you customize your domain and you need to provide your email address to obtain a Letsencrypt certificate. Certificate renewal will happen automatically and will be executed internally by the caddy server. + +``` +netdata.example.org { + proxy / netdata:19999 + tls admin@example.org +} +``` + +### docker-compose.yml + +After setting Caddyfile run this with `docker-compose up -d` to have fully functioning netdata setup behind HTTP reverse proxy. + +```yaml +version: '3' +volumes: + caddy: + +services: + caddy: + image: abiosoft/caddy + ports: + - 80:80 + - 443:443 + volumes: + - /opt/Caddyfile:/etc/Caddyfile + - caddy:/root/.caddy + environment: + ACME_AGREE: 'true' + netdata: + restart: always + hostname: netdata.example.org + image: netdata/netdata + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +### Restrict access with basic auth + +You can restrict access by following [official caddy guide](https://caddyserver.com/docs/basicauth) and adding lines to Caddyfile. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fdocker%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() + +## Publish a test image to your own repository + +At netdata we provide multiple ways of testing your docker images using your own repositories. +You may either use the command line tools available or take advantage of our Travis CI infrastructure. + +### Using tools manually from the command line + +The script `packaging/docker/build-test.sh` can be used to create an image and upload it to a repository of your choosing. + +``` +Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s] + -s skip build, just push the image +Builds an amd64 image and pushes it to the docker hub repository REPOSITORY +``` + +This is especially useful when testing a Pull Request for Kubernetes, since you can set `image` to an immutable repository and tag, set the `imagePullPolicy` to `Always` and just keep uploading new images. + +Example: + +We get a local copy of the Helm chart at https://github.com/netdata/helmchart. We modify `values.yaml` to have the following: + +``` +image: + repository: cakrit/netdata-prs + tag: PR5576 + pullPolicy: Always +``` + +We check out PR5576 and run the following: +``` +./packaging/docker/build-test.sh -r cakrit/netdata-prs -v PR5576 -u cakrit -p 'XXX' +``` + +Then we can run `helm install [path to our helmchart clone]`. + +If we make changes to the code, we execute the same `build-test.sh` command, followed by `helm upgrade [name] [path to our helmchart clone]` + +### Inside netdata organization, using Travis CI + +To enable Travis CI integration on your own repositories (Docker and Github), you need to be part of the Netdata organization. +Once you have contacted the netdata owners to setup you up on Github and Travis, execute the following steps + +- Preparation + - Have netdata forked on your personal GITHUB account + - Get a GITHUB token: Go to Github settings -> Developer Settings -> Personal access tokens, generate a new token with full access to repo_hook, read only access to admin:org, public_repo, repo_deployment, repo:status and user:email settings enabled. This will be your GITHUB_TOKEN that is described later in the instructions, so keep it somewhere safe until is needed. + - Contact netdata team and seek for permissions on https://scan.coverity.com should you require Travis to be able to push your forked code to coverity for analysis and report. Once you are setup, you should have your email you used in coverity and a token from them. These will be your COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN that we will refer to later. + - Have a valid Docker hub account, the credentials from this account will be your DOCKER_USERNAME and DOCKER_PWD mentioned later + +- Setting up Travis CI for your own fork (Detailed instructions provided by Travis team [here](https://docs.travis-ci.com/user/tutorial/)) + - Login to travis with your own GITHUB credentials (There is Open Auth access) + - Go to your profile settings, under [repositories](https://travis-ci.com/account/repositories) section and setup your netdata fork to be built by travis + - Once the repository has been setup, go to repository settings within travis (usually under https://travis-ci.com/NETDATA_DEVELOPER/netdata/settings, where "NETDATA_DEVELOPER" is your github handle) and select your desired settings. +- While in Travis settings, under netdata repository settings in the Environment Variables section, you need to add the following: + - DOCKER_USERNAME and DOCKER_PWD variables so that Travis can login to your docker hub account and publish docker images there. + - REPOSITORY variable to "NETDATA_DEVELOPER/netdata" where NETDATA_DEVELOPER is your github handle again. + - GITHUB_TOKEN variable with the token generated on the preparation step, for travis workflows to function properly + - COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN variables to enable Travis to submit your code for analysis to Coverity. + +Having followed these instructions, your forked repository should be all set up for Travis Integration, happy testing! diff --git a/packaging/docker/build-test.sh b/packaging/docker/build-test.sh new file mode 100755 index 000000000..a7e31d4f4 --- /dev/null +++ b/packaging/docker/build-test.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# Docker build wrapper, for testing manually the docker build process +# TODO: This script should consume build.sh after setting up required parameters +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author : Chris Akritidis (chris@netdata.cloud) +# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) + +printhelp() { + echo "Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s] + -s skip build, just push the image +Builds an amd64 image and pushes it to the docker hub repository REPOSITORY" +} + +set -e + +if [ ! -f .gitignore ]; then + echo "Run as ./packaging/docker/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +DOBUILD=1 +while getopts :r:v:u:p:s option +do + case "$option" in + r) + REPOSITORY=$OPTARG + ;; + v) + VERSION=$OPTARG + ;; + u) + DOCKER_USERNAME=$OPTARG + ;; + p) + DOCKER_PWD=$OPTARG + ;; + s) + DOBUILD=0 + ;; + *) + printhelp + exit 1 + ;; + esac +done + +if [ -n "${REPOSITORY}" ] && [ -n "${VERSION}" ] && [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then + if [ $DOBUILD -eq 1 ] ; then + echo "Building ${VERSION} of ${REPOSITORY} container" + docker run --rm --privileged multiarch/qemu-user-static:register --reset + + # Build images using multi-arch Dockerfile. + eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION}" --file packaging/docker/Dockerfile ./ + + # Create temporary docker CLI config with experimental features enabled (manifests v2 need it) + mkdir -p /tmp/docker + #echo '{"experimental":"enabled"}' > /tmp/docker/config.json + fi + + # Login to docker hub to allow futher operations + echo "Logging into docker" + echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin + + echo "Pushing ${REPOSITORY}:${VERSION}" + docker --config /tmp/docker push "${REPOSITORY}:${VERSION}" +else + echo "Missing parameter. REPOSITORY=${REPOSITORY} VERSION=${VERSION} DOCKER_USERNAME=${DOCKER_USERNAME} DOCKER_PWD=${DOCKER_PWD}" + printhelp + exit 1 +fi diff --git a/packaging/docker/build.sh b/packaging/docker/build.sh new file mode 100755 index 000000000..f15c7dad7 --- /dev/null +++ b/packaging/docker/build.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author : Pawel Krupa (paulfantom) +# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) + +set -e + +if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then + echo "This mechanism currently can only run on BASH version 4 and above" + exit 1 +fi + +VERSION="$1" +declare -A ARCH_MAP +ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64") +DEVEL_ARCHS=(amd64) +ARCHS="${!ARCH_MAP[@]}" + +if [ -z ${REPOSITORY} ]; then + REPOSITORY="${TRAVIS_REPO_SLUG}" + if [ -z ${REPOSITORY} ]; then + echo "REPOSITORY not set, build cannot proceed" + exit 1 + else + echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}" + fi +fi + +# When development mode is set, build on DEVEL_ARCHS +if [ ! -z ${DEVEL+x} ]; then + declare -a ARCHS=(${DEVEL_ARCHS[@]}) +fi + +# Ensure there is a version, the most appropriate one +if [ "${VERSION}" == "" ]; then + VERSION=$(git tag --points-at) + if [ "${VERSION}" == "" ]; then + VERSION="latest" + fi +fi + +# If we are not in netdata git repo, at the top level directory, fail +TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") +CWD=$(git rev-parse --show-cdup) +if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then + echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository" + echo "Docker build process aborted" + exit 1 +fi + +echo "Docker image build in progress.." +echo "Version : ${VERSION}" +echo "Repository : ${REPOSITORY}" +echo "Architectures : ${ARCHS[*]}" + +docker run --rm --privileged multiarch/qemu-user-static:register --reset + +# Build images using multi-arch Dockerfile. +for ARCH in ${ARCHS[@]}; do + TAG="${REPOSITORY}:${VERSION}-${ARCH}" + echo "Building tag ${TAG}.." + eval docker build --no-cache \ + --build-arg ARCH="${ARCH}" \ + --tag "${TAG}" \ + --file packaging/docker/Dockerfile ./ + echo "..Done!" +done + +echo "Docker build process completed!" diff --git a/packaging/docker/check_login.sh b/packaging/docker/check_login.sh new file mode 100755 index 000000000..7cc8d4e50 --- /dev/null +++ b/packaging/docker/check_login.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# +# This is a credential checker script, to help get early input on docker credentials status +# If these are wrong, then build/publish has no point running +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) + +set -e + +if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then + echo "This mechanism currently can only run on BASH version 4 and above" + exit 1 +fi + +DOCKER_CMD="docker " + +# There is no reason to continue if we cannot log in to docker hub +if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then + echo "No docker hub username or password found, aborting without publishing" + exit 1 +fi + +# If we are not in netdata git repo, at the top level directory, fail +TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") +CWD=$(git rev-parse --show-cdup) +if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then + echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository" + echo "Docker build process aborted" + exit 1 +fi + +# Login to docker hub to allow futher operations +echo "Attempting to login to docker" +echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin + +echo "Docker login successful!" +$DOCKER_CMD logout + +echo "Docker login validation completed" diff --git a/packaging/docker/publish.sh b/packaging/docker/publish.sh new file mode 100755 index 000000000..948787b0b --- /dev/null +++ b/packaging/docker/publish.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +# +# Cross-arch docker publish helper script +# Needs docker in version >18.02 due to usage of manifests +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) + +set -e + +if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then + echo "This mechanism currently can only run on BASH version 4 and above" + exit 1 +fi + +WORKDIR="$(mktemp -d)" # Temporary folder, removed after script is done +VERSION="$1" +declare -A ARCH_MAP +ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64") +DEVEL_ARCHS=(amd64) +ARCHS="${!ARCH_MAP[@]}" +DOCKER_CMD="docker --config ${WORKDIR}" + +if [ -z ${REPOSITORY} ]; then + REPOSITORY="${TRAVIS_REPO_SLUG}" + if [ -z ${REPOSITORY} ]; then + echo "REPOSITORY not set, publish cannot proceed" + exit 1 + else + echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}" + fi +fi + +# When development mode is set, build on DEVEL_ARCHS +if [ ! -z ${DEVEL+x} ]; then + declare -a ARCHS=(${DEVEL_ARCHS[@]}) +fi + +# Ensure there is a version, the most appropriate one +if [ "${VERSION}" == "" ]; then + VERSION=$(git tag --points-at) + if [ "${VERSION}" == "" ]; then + VERSION="latest" + fi +fi +MANIFEST_LIST="${REPOSITORY}:${VERSION}" + +# There is no reason to continue if we cannot log in to docker hub +if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then + echo "No docker hub username or password found, aborting without publishing" + exit 1 +fi + +# If we are not in netdata git repo, at the top level directory, fail +TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") +CWD=$(git rev-parse --show-cdup) +if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then + echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository" + echo "Docker build process aborted" + exit 1 +fi + +echo "Docker image publishing in progress.." +echo "Version : ${VERSION}" +echo "Repository : ${REPOSITORY}" +echo "Architectures : ${ARCHS[*]}" +echo "Manifest list : ${MANIFEST_LIST}" + +# Create temporary docker CLI config with experimental features enabled (manifests v2 need it) +echo '{"experimental":"enabled"}' > "${WORKDIR}"/config.json + +# Login to docker hub to allow futher operations +echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin + +# Push images to registry +for ARCH in ${ARCHS[@]}; do + TAG="${MANIFEST_LIST}-${ARCH}" + echo "Publishing image ${TAG}.." + $DOCKER_CMD push "${TAG}" & + echo "Image ${TAG} published succesfully!" +done + +echo "Waiting for images publishing to complete" +wait + +# Recreate docker manifest list +echo "Creating manifest list.." +$DOCKER_CMD manifest create --amend "${MANIFEST_LIST}" \ + "${MANIFEST_LIST}-i386" \ + "${MANIFEST_LIST}-armhf" \ + "${MANIFEST_LIST}-aarch64" \ + "${MANIFEST_LIST}-amd64" + +# Annotate manifest with CPU architecture information + +echo "Executing manifest annotate.." +for ARCH in ${ARCHS[@]}; do + TAG="${MANIFEST_LIST}-${ARCH}" + echo "Annotating manifest for $ARCH, with TAG: ${TAG} (Manifest list: ${MANIFEST_LIST})" + $DOCKER_CMD manifest annotate "${MANIFEST_LIST}" "${TAG}" --os linux --arch "${ARCH_MAP[$ARCH]}" +done + +# Push manifest to docker hub +echo "Pushing manifest list to docker.." +$DOCKER_CMD manifest push -p "${MANIFEST_LIST}" + +# Show current manifest (debugging purpose only) +echo "Evaluating manifest list entry" +$DOCKER_CMD manifest inspect "${MANIFEST_LIST}" + +# Cleanup +rm -r "${WORKDIR}" + +echo "Docker publishing process completed!" diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh new file mode 100644 index 000000000..243cae8a2 --- /dev/null +++ b/packaging/docker/run.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +#set -e + +if [ ${RESCRAMBLE+x} ]; then + echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" + apk upgrade --update-cache --available +fi + +if [ ${PGID+x} ]; then + echo "Adding user netdata to group with id ${PGID}" + addgroup -g "${PGID}" -S hostgroup 2>/dev/null + sed -i "s/${PGID}:$/${PGID}:netdata/g" /etc/group +fi + +exec /usr/sbin/netdata -u netdata -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums index 50de0be21..ae57b3c78 100644 --- a/packaging/go.d.checksums +++ b/packaging/go.d.checksums @@ -1,16 +1,16 @@ -dc9bd3ed7af8725a124cca5dd88a371affd40f92450b256a01971986941fb7b8 *config.tar.gz -ee638c0b4fd0d93f58ff8e6e9b075bcec32b01535247ebdc76c4736ff6b01a3a *go.d.plugin-v0.4.0.darwin-386 -815e74726cbf193c2e17338e2bfb0fdae3caf86af70fd3d755fdc785a67077fe *go.d.plugin-v0.4.0.darwin-amd64 -1bc890382b62b8f16a0e8a9183bcdbd538322ba44f6e567d5e3a65043db43889 *go.d.plugin-v0.4.0.freebsd-386 -e38aa0a38ec30b986a97e4c4c8f7cef505d0301521dacd2ad16a9d8e3dab5c89 *go.d.plugin-v0.4.0.freebsd-amd64 -cfee331b57dfb8defbd1533de828210b9a7b51a79eef7724dd5fe7a0560f8b57 *go.d.plugin-v0.4.0.freebsd-arm -735e23add96a27b671c5e5b81814c01d3a20c9e6f9e9ec0347114b80368d8d3c *go.d.plugin-v0.4.0.linux-386 -3312d88005d04202df7c2ba2dfeccf02d18220af1a971ffc7fe5591d9d5f0bb7 *go.d.plugin-v0.4.0.linux-amd64 -afa80dd193f82491044e869599cc2984123a9550281d62ace1e3f29b9a163d88 *go.d.plugin-v0.4.0.linux-arm -b9b610ec955192cd899208f51a797236830cced58935f4fbb64b8b82fa88adfe *go.d.plugin-v0.4.0.linux-arm64 -71d0cde540c20f000ec606accd1e987b215b25cb63bc00fa5a6cc9bab95c6183 *go.d.plugin-v0.4.0.linux-mips -702bf333b6903669896f0eb1f19e71159516fe7452fcec4ab3eb669a3e3dc975 *go.d.plugin-v0.4.0.linux-mips64 -f24d39afd1fbbc818afc99d21ab33584c337c3d0d0014ec4132e7f020d04d5e6 *go.d.plugin-v0.4.0.linux-mips64le -1584e1124ba32fc6fa6112da8dc422e8a6259520bd5aac7f35590b4065c66ec0 *go.d.plugin-v0.4.0.linux-mipsle -eb05509a47f10fe2f2a98a3dbd89ba76de6b807d2bbd1732b096ad1756d0b695 *go.d.plugin-v0.4.0.linux-ppc64 -048fd2ba81e44e7b979a8175b711324c31ecd22cb7746bac3c05b88d8571122a *go.d.plugin-v0.4.0.linux-ppc64le +f851c86df8248e52602e39c3198c9b0d858a70c24c5e5c3fb63d691ede5ae9c6 *config.tar.gz +a27dddfc9a783980375aa1f5c54dcfbaf38044311bd16e0371cffd94a2ebe46e *go.d.plugin-v0.5.0.darwin-386 +1d4815d92860089728944f6b893fea16dc51dd6e47a81e5a7599abfdc73ff2de *go.d.plugin-v0.5.0.darwin-amd64 +a3c76f4b806bf930d344a83b0dc2b3fabe16f747ba89b96eac7fcbdb88c4b058 *go.d.plugin-v0.5.0.freebsd-386 +673f61317b8e6f2b226f30d106cff3532d8a3ee3453997d11f984d76c55831ce *go.d.plugin-v0.5.0.freebsd-amd64 +a352b24578d497b505031b8a84e541532d8f4f2543e3ea454b674dece426982c *go.d.plugin-v0.5.0.freebsd-arm +0a3a4249dd94c2cd4bc0f9ac3e49d5f19ff3a52d91fc4540a17688a4c1b71ce8 *go.d.plugin-v0.5.0.linux-386 +40e034ec19952467b85aebda3c57b823c9e75d799318669c4a811b4296382396 *go.d.plugin-v0.5.0.linux-amd64 +74b955b838939a73455403181cf4be67c8f5d0d313f3da0504a6b47605b22ae0 *go.d.plugin-v0.5.0.linux-arm +8d564d5bc689fdf46b63fa9b4d152f8ce84bfad102d358f1d3acd390aebf1c2d *go.d.plugin-v0.5.0.linux-arm64 +dd2c9c4e842248f8d7d0588057507e4b683cc9ebef406886c3a839afbcbdee3f *go.d.plugin-v0.5.0.linux-mips +046e315f82b0dd9fa792a0cd07d25564e768d7d44c7c388f3f432e0d3a98da50 *go.d.plugin-v0.5.0.linux-mips64 +6a05c782d5b8200a51eb5334b9c0750a6d511d442078614729592582ab40da05 *go.d.plugin-v0.5.0.linux-mips64le +0f5427fb451aa34cdc71b2c3d0a2d638f63e8bc60f7cffdf62258fc88048d39e *go.d.plugin-v0.5.0.linux-mipsle +a5d21ed9c9858d9fe24ade24825e5449151e5dd114f9715c26d6c03ad6d70919 *go.d.plugin-v0.5.0.linux-ppc64 +c7ec8b4ae2b94f7689f4a6722a5fac7a8302574e9a906e4b76af70bff624557c *go.d.plugin-v0.5.0.linux-ppc64le diff --git a/packaging/installer/README.md b/packaging/installer/README.md index 1fa8f14d5..6dc084e83 100644 --- a/packaging/installer/README.md +++ b/packaging/installer/README.md @@ -42,7 +42,7 @@ bash <(curl -Ss https://my-netdata.io/kickstart.sh) Verify the integrity of the script with this: ```bash -[ "15c688e7228ebee83ace1811273cd089" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +[ "fe451cd039c8f99b2ba4ca0feab88033" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` *It should print `OK, VALID` if the script is the one we ship.* @@ -99,7 +99,7 @@ To install Netdata with a binary package on any Linux distro, any kernel version Verify the integrity of the script with this: ```bash -[ "97427a0fc5a52593b603c2ae887d4466" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +[ "9ff4f5f37d23dff431f80d5349e0a25c" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` *It should print `OK, VALID` if the script is the one we ship.* @@ -191,13 +191,13 @@ This is how to do it by hand: ```sh # Debian / Ubuntu -apt-get install zlib1g-dev uuid-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl +apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl # Fedora -dnf install zlib-devel libuuid-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils +dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils # CentOS / Red Hat Enterprise Linux -yum install autoconf automake curl gcc git libmnl-devel libuuid-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel +yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel ``` @@ -234,6 +234,17 @@ package|description *Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* +Netdata DB engine can be enabled when these are installed (they are optional): + +|package|description| +|:-----:|-----------| +|`libuv`|multi-platform support library with a focus on asynchronous I/O| +|`liblz4`|Extremely Fast Compression algorithm| +|`Judy`|General purpose dynamic array| +|`openssl`|Cryptography and SSL/TLS Toolkit| + +*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* + --- ### Install Netdata @@ -299,16 +310,19 @@ Note first three packages are downloaded from the pfSense repository for maintai pkg install pkgconf pkg install bash pkg install e2fsprogs-libuuid -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.11.0.txz +pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/python36-3.6.8_2.txz +pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.13.0.txz ``` To start Netdata manually run `service netdata onestart` -To start Netdata automatically at each boot add `service netdata start` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**). +To start Netdata automatically at each boot add `service netdata onestart` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**). Shellcmd Type should be set to `Shellcmd`. -![](https://user-images.githubusercontent.com/36808164/36930790-4db3aa84-1f0d-11e8-8752-cdc08bb7207c.png) +![](https://i.imgur.com/wcKiPe1.png) Alternatively more information can be found in https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages, for achieving the same via the command line and scripts. -If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from [https://redmine.pfsense.org/issues/6643](https://redmine.pfsense.org/issues/6643) +If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from [https://redmine.pfsense.org/issues/6643](https://redmine.pfsense.org/issues/6643) + +**Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata` ##### FreeNAS On FreeNAS-Corral-RELEASE (>=10.0.3), Netdata is pre-installed. diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md index c93ebf9b4..0903ddb57 100644 --- a/packaging/installer/UPDATE.md +++ b/packaging/installer/UPDATE.md @@ -11,7 +11,7 @@ The update procedure depends on how you installed it: ### Manual update to get the latest git commit -netdata versions older than `v1.12.0-rc2-52` had a `netdata-updater.sh` script in the root directory of the source code, which has now been deprecated. The manual process that works for all versions to get the latest commit in git is to use the `netdata-installer.sh`. The installer preserves your custom configuration and updates the the information of the installation in the `.environment` file under the user configuration directory. +netdata versions older than `v1.12.0-rc2-52` had a `netdata-updater.sh` script in the root directory of the source code, which has now been deprecated. The manual process that works for all versions to get the latest commit in git is to use the `netdata-installer.sh`. The installer preserves your custom configuration and updates the information of the installation in the `.environment` file under the user configuration directory. ```sh # go to the git downloaded directory diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh index 4270c4ee0..d1e944878 100644 --- a/packaging/installer/functions.sh +++ b/packaging/installer/functions.sh @@ -606,6 +606,11 @@ portable_add_user() { run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0 fi + # mac OS + if command -v sysadminctl 1> /dev/null 2>&1; then + run sysadminctl -addUser ${username} && return 0 + fi + echo >&2 "Failed to add ${username} user account !" return 1 @@ -637,6 +642,11 @@ portable_add_group() { run addgroup "${groupname}" && return 0 fi + # mac OS + if command -v dseditgroup 1> /dev/null 2>&1; then + dseditgroup -o create "${groupname}" && return 0 + fi + echo >&2 "Failed to add ${groupname} user group !" return 1 } @@ -674,6 +684,10 @@ portable_add_user_to_group() { run addgroup "${username}" "${groupname}" && return 0 fi + # mac OS + if command -v dseditgroup 1> /dev/null 2>&1; then + dseditgroup -u "${username}" "${groupname}" && return 0 + fi echo >&2 "Failed to add user ${username} to group ${groupname} !" return 1 fi diff --git a/packaging/installer/kickstart-static64.sh b/packaging/installer/kickstart-static64.sh index 6ef3a1232..505179051 100755 --- a/packaging/installer/kickstart-static64.sh +++ b/packaging/installer/kickstart-static64.sh @@ -32,13 +32,27 @@ setup_terminal() { return 0 } +setup_terminal || echo >/dev/null -progress() { - echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +# ----------------------------------------------------------------------------- +fatal() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" + exit 1 +} + +run_ok() { + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" } +run_failed() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" +} + +ESCAPED_PRINT_METHOD= +printf "%q " test >/dev/null 2>&1 +[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" escaped_print() { - if printf "%q " test >/dev/null 2>&1; then + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then printf "%q " "${@}" else printf "%s" "${*}" @@ -46,24 +60,39 @@ escaped_print() { return 0 } +progress() { + echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +run_logfile="/dev/null" run() { - local dir="${PWD}" info_console + local user="${USER--}" dir="${PWD}" info info_console if [ "${UID}" = "0" ]; then + info="[root ${dir}]# " info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " else + info="[${user} ${dir}]$ " info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " fi - escaped_print "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" "${@}" "${TPUT_RESET}\n" >&2 + printf >>"${run_logfile}" "${info}" + escaped_print >>"${run_logfile}" "${@}" + printf >>"${run_logfile}" " ... " + + printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" + escaped_print >&2 "${@}" + printf >&2 "${TPUT_RESET}\n" "${@}" local ret=$? if [ ${ret} -ne 0 ]; then - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" + run_failed + printf >>"${run_logfile}" "FAILED with exit code ${ret}\n" else - printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" + run_ok + printf >>"${run_logfile}" "OK\n" fi return ${ret} @@ -130,8 +159,6 @@ sudo="" [ -z "${UID}" ] && UID="$(id -u)" [ "${UID}" -ne "0" ] && sudo="sudo" -setup_terminal || echo >/dev/null - # --------------------------------------------------------------------------------------------------------------------- if [ "$(uname -m)" != "x86_64" ]; then fatal "Static binary versions of netdata are available only for 64bit Intel/AMD CPUs (x86_64), but yours is: $(uname -m)." diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh index a8057c19c..2db95f21d 100755 --- a/packaging/installer/kickstart.sh +++ b/packaging/installer/kickstart.sh @@ -58,13 +58,27 @@ setup_terminal() { return 0 } +setup_terminal || echo >/dev/null -progress() { - echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +# ----------------------------------------------------------------------------- +fatal() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" + exit 1 +} + +run_ok() { + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" } +run_failed() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" +} + +ESCAPED_PRINT_METHOD= +printf "%q " test >/dev/null 2>&1 +[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" escaped_print() { - if printf "%q " test >/dev/null 2>&1; then + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then printf "%q " "${@}" else printf "%s" "${*}" @@ -72,24 +86,39 @@ escaped_print() { return 0 } +progress() { + echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +run_logfile="/dev/null" run() { - local dir="${PWD}" info_console + local user="${USER--}" dir="${PWD}" info info_console if [ "${UID}" = "0" ]; then + info="[root ${dir}]# " info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " else + info="[${user} ${dir}]$ " info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " fi - escaped_print "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" "${@}" "${TPUT_RESET}\n" >&2 + printf >>"${run_logfile}" "${info}" + escaped_print >>"${run_logfile}" "${@}" + printf >>"${run_logfile}" " ... " + + printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" + escaped_print >&2 "${@}" + printf >&2 "${TPUT_RESET}\n" - ${@} + "${@}" local ret=$? if [ ${ret} -ne 0 ]; then - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" + run_failed + printf >>"${run_logfile}" "FAILED with exit code ${ret}\n" else - printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" + run_ok + printf >>"${run_logfile}" "OK\n" fi return ${ret} @@ -224,8 +253,6 @@ sudo="" [ "${UID}" -ne "0" ] && sudo="sudo" export PATH="${PATH}:/usr/local/bin:/usr/local/sbin" -setup_terminal || echo >/dev/null - # --------------------------------------------------------------------------------------------------------------------- # try to update using autoupdater in the first place diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh index 96dd62906..cfd858c02 100755 --- a/packaging/installer/netdata-uninstaller.sh +++ b/packaging/installer/netdata-uninstaller.sh @@ -1,11 +1,15 @@ #!/usr/bin/env bash #shellcheck disable=SC2181 - -# this script will uninstall netdata - +# +# This is the netdata uninstaller script # Variables needed by script and taken from '.environment' file: # - NETDATA_PREFIX # - NETDATA_ADDED_TO_GROUPS +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author: Paul Emm. Katsoulakis <paul@netdata.cloud> +# usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything @@ -46,13 +50,13 @@ while :; do done if [ "$YES" != "1" ]; then - echo "This script will REMOVE netdata from your system." - echo "Run it again with --yes to do it." + echo >&2 "This script will REMOVE netdata from your system." + echo >&2 "Run it again with --yes to do it." exit 1 fi if [[ $EUID -ne 0 ]]; then - echo "This script SHOULD be run as root or otherwise it won't delete all installed components." + echo >&2 "This script SHOULD be run as root or otherwise it won't delete all installed components." key="n" read -r -s -n 1 -p "Do you want to continue as non-root user [y/n] ? " key if [ "$key" != "y" ] && [ "$key" != "Y" ]; then @@ -60,34 +64,191 @@ if [[ $EUID -ne 0 ]]; then fi fi -function quit_msg() { +# ----------------------------------------------------------------------------- + +setup_terminal() { + TPUT_RESET="" + TPUT_YELLOW="" + TPUT_WHITE="" + TPUT_BGRED="" + TPUT_BGGREEN="" + TPUT_BOLD="" + TPUT_DIM="" + + # Is stderr on the terminal? If not, then fail + test -t 2 || return 1 + + if command -v tput 1>/dev/null 2>&1; then + if [ $(($(tput colors 2>/dev/null))) -ge 8 ]; then + # Enable colors + TPUT_RESET="$(tput sgr 0)" + TPUT_YELLOW="$(tput setaf 3)" + TPUT_WHITE="$(tput setaf 7)" + TPUT_BGRED="$(tput setab 1)" + TPUT_BGGREEN="$(tput setab 2)" + TPUT_BOLD="$(tput bold)" + TPUT_DIM="$(tput dim)" + fi + fi + + return 0 +} +setup_terminal || echo >/dev/null + +run_ok() { + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" +} + +run_failed() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" +} + +ESCAPED_PRINT_METHOD= +printf "%q " test >/dev/null 2>&1 +[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" +escaped_print() { + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then + printf "%q " "${@}" + else + printf "%s" "${*}" + fi + return 0 +} + +run_logfile="/dev/null" +run() { + local user="${USER--}" dir="${PWD}" info info_console + + if [ "${UID}" = "0" ]; then + info="[root ${dir}]# " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " + else + info="[${user} ${dir}]$ " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " + fi + + printf >>"${run_logfile}" "${info}" + escaped_print >>"${run_logfile}" "${@}" + printf >>"${run_logfile}" " ... " + + printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" + escaped_print >&2 "${@}" + printf >&2 "${TPUT_RESET}\n" + + "${@}" + + local ret=$? + if [ ${ret} -ne 0 ]; then + run_failed + printf >>"${run_logfile}" "FAILED with exit code ${ret}\n" + else + run_ok + printf >>"${run_logfile}" "OK\n" + fi + + return ${ret} +} + +portable_del_group() { + local groupname="${1}" + + # Check if group exist + echo >&2 "Removing ${groupname} user group ..." + + # Linux + if command -v groupdel 1>/dev/null 2>&1; then + run groupdel -f "${groupname}" && return 0 + fi + + # mac OS + if command -v dseditgroup 1> /dev/null 2>&1; then + if dseditgroup -o read netdata 1> /dev/null 2>&1; then + run dseditgroup -o delete "${groupname}" && return 0 + else + echo >&2 "Could not find group ${groupname}, nothing to do" + fi + fi + + echo >&2 "Group ${groupname} was not automatically removed, you might have to remove it manually" + return 1 +} + +portable_del_user() { + local username="${1}" + echo >&2 "Deleting ${username} user account ..." + + # Linux + if command -v userdel 1>/dev/null 2>&1; then + run userdel -f "${username}" && return 0 + fi + + # mac OS + if command -v sysadminctl 1>/dev/null 2>&1; then + run sysadminctl -deleteUser "${username}" && return 0 + fi + + echo >&2 "User ${username} could not be deleted from system, you might have to remove it manually" + return 1 +} + +portable_del_user_from_group() { + local groupname="${1}" username="${2}" + + # username is not in group + echo >&2 "Deleting ${username} user from ${groupname} group ..." + + # Linux + if command -v gpasswd 1>/dev/null 2>&1; then + run gpasswd -d "netdata" "${group}" && return 0 + fi + + # FreeBSD + if command -v pw 1>/dev/null 2>&1; then + run pw groupmod "${groupname}" -d "${username}" && return 0 + fi + + # BusyBox + if command -v delgroup 1>/dev/null 2>&1; then + run delgroup "${username}" "${groupname}" && return 0 + fi + + # mac OS + if command -v dseditgroup 1> /dev/null 2>&1; then + run dseditgroup -o delete -u "${username}" "${groupname}" && return 0 + fi + + echo >&2 "Failed to delete user ${username} from group ${groupname} !" + return 1 +} + +quit_msg() { echo if [ "$FILE_REMOVAL_STATUS" -eq 0 ]; then - echo "Something went wrong :(" + echo >&2 "Something went wrong :(" else - echo "Netdata files were successfully removed from your system" + echo >&2 "Netdata files were successfully removed from your system" fi } -function user_input() { +user_input() { TEXT="$1" if [ "${INTERACTIVITY}" == "-i" ]; then read -r -p "$TEXT" >&2 fi } -function rm_file() { +rm_file() { FILE="$1" if [ -f "${FILE}" ]; then - rm -v ${INTERACTIVITY} "${FILE}" + run rm -v ${INTERACTIVITY} "${FILE}" fi } -function rm_dir() { +rm_dir() { DIR="$1" if [ -n "$DIR" ] && [ -d "$DIR" ]; then user_input "Press ENTER to recursively delete directory '$DIR' > " - rm -v -f -R "${DIR}" + run rm -v -f -R "${DIR}" fi } @@ -116,14 +277,14 @@ trap quit_msg EXIT source "${ENVIRONMENT_FILE}" || exit 1 #### STOP NETDATA -echo "Stopping a possibly running netdata..." +echo >&2 "Stopping a possibly running netdata..." for p in $(netdata_pids); do i=0 while kill "${p}" 2>/dev/null; do if [ "$i" -gt 30 ]; then - echo "Forcefully stopping netdata with pid ${p}" - kill -9 "${p}" - sleep 2 + echo >&2 "Forcefully stopping netdata with pid ${p}" + run kill -9 "${p}" + run sleep 2 break fi sleep 1 @@ -155,15 +316,18 @@ fi FILE_REMOVAL_STATUS=1 -#### REMOVE NETDATA USER & GROUP +#### REMOVE NETDATA USER FROM ADDED GROUPS if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then user_input "Press ENTER to delete 'netdata' from following groups: '$NETDATA_ADDED_TO_GROUPS' > " for group in $NETDATA_ADDED_TO_GROUPS; do - gpasswd -d netdata "${group}" + portable_del_user_from_group "${group}" "netdata" done fi +#### REMOVE USER user_input "Press ENTER to delete 'netdata' system user > " -userdel -f netdata || : +portable_del_user "netdata" || : + +### REMOVE GROUP user_input "Press ENTER to delete 'netdata' system group > " -groupdel -f netdata || : +portable_del_group "netdata" || : diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index 56c837953..21a769ba5 100644..100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -67,6 +67,12 @@ download() { } set_tarball_urls() { + + if [ ! -z "${NETDATA_LOCAL_TARBAL_OVERRIDE}" ]; then + info "Not fetching remote tarballs, local override was given" + return + fi + if [ "$1" == "stable" ]; then local latest # Simple version @@ -83,22 +89,34 @@ set_tarball_urls() { update() { [ -z "${logfile}" ] && info "Running on a terminal - (this script also supports running headless from crontab)" - dir=$(create_tmp_directory) - cd "$dir" + RUN_INSTALLER=0 + tmpdir=$(create_tmp_directory) + cd "$tmpdir" - download "${NETDATA_TARBALL_CHECKSUM_URL}" "${dir}/sha256sum.txt" >&3 2>&3 - if grep "${NETDATA_TARBALL_CHECKSUM}" sha256sum.txt >&3 2>&3; then - info "Newest version is already installed" - else - download "${NETDATA_TARBALL_URL}" "${dir}/netdata-latest.tar.gz" - if ! grep netdata-latest.tar.gz sha256sum.txt | safe_sha256sum -c - >&3 2>&3; then - fatal "Tarball checksum validation failed. Stopping netdata upgrade and leaving tarball in ${dir}" + if [ -z "${NETDATA_LOCAL_TARBAL_OVERRIDE}" ]; then + download "${NETDATA_TARBALL_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt" >&3 2>&3 + if grep "${NETDATA_TARBALL_CHECKSUM}" sha256sum.txt >&3 2>&3; then + info "Newest version is already installed" + else + download "${NETDATA_TARBALL_URL}" "${tmpdir}/netdata-latest.tar.gz" + if ! grep netdata-latest.tar.gz sha256sum.txt | safe_sha256sum -c - >&3 2>&3; then + fatal "Tarball checksum validation failed. Stopping netdata upgrade and leaving tarball in ${tmpdir}" + fi + NEW_CHECKSUM="$(safe_sha256sum netdata-latest.tar.gz 2>/dev/null| cut -d' ' -f1)" + tar -xf netdata-latest.tar.gz >&3 2>&3 + rm netdata-latest.tar.gz >&3 2>&3 + cd netdata-* + RUN_INSTALLER=1 fi - NEW_CHECKSUM="$(safe_sha256sum netdata-latest.tar.gz 2>/dev/null| cut -d' ' -f1)" - tar -xf netdata-latest.tar.gz >&3 2>&3 - rm netdata-latest.tar.gz >&3 2>&3 - cd netdata-* + else + info "!!Local tarball override detected!! - Entering directory ${NETDATA_LOCAL_TARBAL_OVERRIDE} for installation, not downloading anything" + RUN_INSTALLER=1 + cd ${NETDATA_LOCAL_TARBAL_OVERRIDE} + fi + + # We got the sources, run the update now + if [ ${RUN_INSTALLER} -eq 1 ]; then # signal netdata to start saving its database # this is handy if your database is big pids=$(pidof netdata) @@ -119,9 +137,9 @@ NETDATA_TARBALL_CHECKSUM="$NEW_CHECKSUM" EOF fi - rm -rf "${dir}" >&3 2>&3 + rm -rf "${tmpdir}" >&3 2>&3 [ -n "${logfile}" ] && rm "${logfile}" && logfile= - return 0 + return } # Usually stored in /etc/netdata/.environment diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md new file mode 100644 index 000000000..9fb36e771 --- /dev/null +++ b/packaging/maintainers/README.md @@ -0,0 +1,75 @@ +# Package Maintainers + +This page tracks the package maintainers for netdata, for various operating systems and versions. + +> Feel free to update it, so that it reflects the current status. + + +--- + +## Official Linux Distributions + +| Linux Distribution | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) | +| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) | +| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) | +| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) | +| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) | +| Ubuntu | | | | +| Red Hat / Fedora / Centos | | | | +| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) | + +--- +## FreeBSD + +| System | Initial PR | Core Developer | Package Maintainer +|:-:|:-:|:-:|:-:| +FreeBSD|#1321|@vlvkobal|@mmokhi + +--- +## MacOS + +| System | URL | Core Developer | Package Maintainer +|:-:|:-:|:-:|:-:| +MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen + +--- +## Unofficial Linux Packages + +| Linux Distribution | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 | + +--- +## Embedded Linux + +| Embedded Linux | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 | +| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) | +| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata | +| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 | +| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi | + +--- +## Linux Containers + +| Containers | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Docker | Git | @titpetric | https://github.com/titpetric/netdata | + +--- +## Automation Systems + +| Automation Systems | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ | +| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook | + +--- +## Packages summary from repology.org + +[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fmaintainers%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md new file mode 100644 index 000000000..eb4c380b1 --- /dev/null +++ b/packaging/makeself/README.md @@ -0,0 +1,48 @@ +# netdata static binary build + +To build the static binary 64-bit distribution package, run: + +```bash +$ cd /path/to/netdata.git +$ ./packaging/makeself/build-x86_64-static.sh +``` + +The program will: + +1. setup a new docker container with Alpine Linux +2. install the required alpine packages (the build environment, needed libraries, etc) +3. download and compile third party apps that are packaged with netdata (`bash`, `curl`, etc) +4. compile netdata + +Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the netdata binary package that can be run to install netdata on any other computer. + +--- + +## building binaries with debug info + +To build netdata binaries with debugging / tracing information in them, use: + +```bash +$ cd /path/to/netdata.git +$ ./packaging/makeself/build-x86_64-static.sh debug +``` + +These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information). + +#### debugging netdata binaries + +Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start netdata: + +```bash +PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D +``` + +The above command, will run netdata under `valgrind`. While netdata runs under `valgrind` it will be 10x slower and use a lot more memory. + +If netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know. + +To stop netdata while it runs under `valgrind`, press Control-C on the console. + +> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static netdata binary, all the required libraries are built into netdata. So, valgrind cannot appply its heuristics and prints them. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fmakeself%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh new file mode 100755 index 000000000..69ddf2bf5 --- /dev/null +++ b/packaging/makeself/build-x86_64-static.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "$0")/../installer/functions.sh || exit 1 + +set -e + +DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine37" + +if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 +then + # To run interactively: + # sudo docker run -it netdata-package-x86_64-static /bin/sh + # (add -v host-dir:guest-dir:rw arguments to mount volumes) + # + # To remove images in order to re-create: + # sudo docker rm -v $(sudo docker ps -a -q -f status=exited) + # sudo docker rmi netdata-package-x86_64-static + # + # This command maps the current directory to + # /usr/src/netdata.git + # inside the container and runs the script install-alpine-packages.sh + # (also inside the container) + # + run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.7 \ + /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh + + # save the changes made permanently + id=$(sudo docker ps -l -q) + run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}" +fi + +# Run the build script inside the container +run sudo docker run -a stdin -a stdout -a stderr -i -t -v \ + $(pwd):/usr/src/netdata.git:rw \ + "${DOCKER_CONTAINER_NAME}" \ + /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" + +if [ "${USER}" ] + then + sudo chown -R "${USER}" . +fi diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh new file mode 100755 index 000000000..e5804c523 --- /dev/null +++ b/packaging/makeself/build.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later + +# ----------------------------------------------------------------------------- +# parse command line arguments + +export NETDATA_BUILD_WITH_DEBUG=0 + +while [ ! -z "${1}" ] +do + case "${1}" in + debug) + export NETDATA_BUILD_WITH_DEBUG=1 + ;; + + *) + ;; + esac + + shift +done + + +# ----------------------------------------------------------------------------- + +# First run install-alpine-packages.sh under alpine linux to install +# the required packages. build-x86_64-static.sh will do this for you +# using docker. + +cd $(dirname "$0") || exit 1 + +# if we don't run inside the netdata repo +# download it and run from it +if [ ! -f ../../netdata-installer.sh ] +then + git clone https://github.com/netdata/netdata.git netdata.git || exit 1 + cd netdata.git/makeself || exit 1 + ./build.sh "$@" + exit $? +fi + +cat >&2 <<EOF + +This program will create a self-extracting shell package containing +a statically linked netdata, able to run on any 64bit Linux system, +without any dependencies from the target system. + +It can be used to have netdata running in no-time, or in cases the +target Linux system cannot compile netdata. + +EOF + +# read -p "Press ENTER to continue > " + +if [ ! -d tmp ] + then + mkdir tmp || exit 1 +fi + +./run-all-jobs.sh "$@" +exit $? diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh new file mode 100755 index 000000000..a0b72223d --- /dev/null +++ b/packaging/makeself/functions.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# ----------------------------------------------------------------------------- + +# allow running the jobs by hand +[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0 +[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}" +[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.." +[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" +[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.." +export NULL= + +# make sure the path does not end with / +if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ] + then + export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}" +fi + +# find the parent directory +export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")" + +# ----------------------------------------------------------------------------- + +# bash strict mode +set -euo pipefail + +# ----------------------------------------------------------------------------- + +fetch() { + local dir="${1}" url="${2}" + local tar="${dir}.tar.gz" + + if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ] + then + run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}" + fi + + if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ] + then + cd "${NETDATA_MAKESELF_PATH}/tmp" + run tar -zxpf "${tar}" + cd - + fi + + run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}" +} + +# ----------------------------------------------------------------------------- + +# load the functions of the netdata-installer.sh +. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh" + +# ----------------------------------------------------------------------------- + +# debug +echo "ME=${0}" +echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}" +echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}" +echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}" +echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}" +echo "PROCESSORS=$(find_processors)" diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh new file mode 100755 index 000000000..bcb971f8f --- /dev/null +++ b/packaging/makeself/install-alpine-packages.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env sh +# +# Installation script for the alpine host +# to prepare the static binary +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author: Paul Emm. Katsoulakis <paul@netdata.cloud> + +# Packaging update +apk update + +# Add required APK packages +apk add --no-cache \ + bash \ + wget \ + curl \ + ncurses \ + git \ + netcat-openbsd \ + alpine-sdk \ + autoconf \ + automake \ + gcc \ + make \ + libtool \ + pkgconfig \ + util-linux-dev \ + openssl-dev \ + gnutls-dev \ + zlib-dev \ + libmnl-dev \ + libnetfilter_acct-dev \ + libuv-dev \ + lz4-dev \ + openssl-dev \ + || exit 1 + +# Judy doesnt seem to be available on the repositories, download manually and install it +export JUDY_VER="1.0.5" +wget -O /judy.tar.gz http://downloads.sourceforge.net/project/judy/judy/Judy-${JUDY_VER}/Judy-${JUDY_VER}.tar.gz +cd / +tar -xf judy.tar.gz +rm judy.tar.gz +cd /judy-${JUDY_VER} +CFLAGS="-O2 -s" CXXFLAGS="-O2 -s" ./configure +make +make install; diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh new file mode 100755 index 000000000..fc4e6d077 --- /dev/null +++ b/packaging/makeself/install-or-update.sh @@ -0,0 +1,234 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/functions.sh + +export LC_ALL=C +umask 002 + +# Be nice on production environments +renice 19 $$ >/dev/null 2>/dev/null + +# ----------------------------------------------------------------------------- + +STARTIT=1 + +while [ ! -z "${1}" ] +do + if [ "${1}" = "--dont-start-it" ] + then + STARTIT=0 + else + echo >&2 "Unknown option '${1}'. Ignoring it." + fi + shift +done + +deleted_stock_configs=0 +if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ] +then + + # ----------------------------------------------------------------------------- + progress "Deleting stock configuration files from user configuration directory" + + declare -A configs_signatures=() + source "system/configs.signatures" + + if [ ! -d etc/netdata ] + then + run mkdir -p etc/netdata + fi + + md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" + for x in $(find etc -type f) + do + # find it relative filename + f="${x/etc\/netdata\//}" + + # find the stock filename + t="${f/.conf.old/.conf}" + t="${t/.conf.orig/.conf}" + + if [ ! -z "${md5sum}" ] + then + # find the checksum of the existing file + md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)" + #echo >&2 "md5: ${md5}" + + # check if it matches + if [ "${configs_signatures[${md5}]}" = "${t}" ] + then + # it matches the default + run rm -f "${x}" + deleted_stock_configs=$(( deleted_stock_configs + 1 )) + fi + fi + done + + touch "etc/netdata/.installer-cleanup-of-stock-configs-done" +fi + +# ----------------------------------------------------------------------------- +progress "Attempt to create user/group netdata/netadata" + +NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody" +NETDATA_ADDED_TO_GROUPS="" +# Default user/group +NETDATA_USER="root" +NETDATA_GROUP="root" + +if portable_add_group netdata; then + if portable_add_user netdata "/opt/netdata"; then + progress "Add user netdata to required user groups" + for g in ${NETDATA_WANTED_GROUPS}; do + # shellcheck disable=SC2086 + portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" || run_failed "Failed to add netdata user to secondary groups" + done + NETDATA_USER="netdata" + NETDATA_GROUP="netdata" + else + run_failed "I could not add user netdata, will be using root" + fi +else + run_failed "I could not add group netdata, so no user netdata will be created as well. Netdata run as root:root" +fi + +# ----------------------------------------------------------------------------- +progress "Check SSL certificates paths" + +if [ ! -f "/etc/ssl/certs/ca-certificates.crt" ] +then + if [ ! -f /opt/netdata/.curlrc ] + then + cacert= + + # CentOS + [ -f "/etc/ssl/certs/ca-bundle.crt" ] && cacert="/etc/ssl/certs/ca-bundle.crt" + + if [ ! -z "${cacert}" ] + then + echo "Creating /opt/netdata/.curlrc with cacert=${cacert}" + echo >/opt/netdata/.curlrc "cacert=${cacert}" + else + run_failed "Failed to find /etc/ssl/certs/ca-certificates.crt" + fi + fi +fi + + +# ----------------------------------------------------------------------------- +progress "Install logrotate configuration for netdata" + +install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata." + + +# ----------------------------------------------------------------------------- +progress "Install netdata at system init" + +install_netdata_service || run_failed "Cannot install netdata init service." + + +# ----------------------------------------------------------------------------- +progress "creating quick links" + +dir_should_be_link() { + local p="${1}" t="${2}" d="${3}" old + + old="${PWD}" + cd "${p}" || return 0 + + if [ -e "${d}" ] + then + if [ -h "${d}" ] + then + run rm "${d}" + else + run mv -f "${d}" "${d}.old.$$" + fi + fi + + run ln -s "${t}" "${d}" + cd "${old}" +} + +dir_should_be_link . bin sbin +dir_should_be_link usr ../bin bin +dir_should_be_link usr ../bin sbin +dir_should_be_link usr . local + +dir_should_be_link . etc/netdata netdata-configs +dir_should_be_link . usr/share/netdata/web netdata-web-files +dir_should_be_link . usr/libexec/netdata netdata-plugins +dir_should_be_link . var/lib/netdata netdata-dbs +dir_should_be_link . var/cache/netdata netdata-metrics +dir_should_be_link . var/log/netdata netdata-logs + +dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig + +if [ ${deleted_stock_configs} -gt 0 ] +then + dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" +fi + + +# ----------------------------------------------------------------------------- + +progress "create user config directories" + +for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" +do + if [ ! -d "etc/netdata/${x}" ] + then + run mkdir -p "etc/netdata/${x}" || exit 1 + fi +done + + +# ----------------------------------------------------------------------------- +progress "fix permissions" + +run chmod g+rx,o+rx /opt +run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata + + +# ----------------------------------------------------------------------------- + +progress "fix plugin permissions" + +for x in apps.plugin freeipmi.plugin ioping cgroup-network +do + f="usr/libexec/netdata/plugins.d/${x}" + + if [ -f "${f}" ] + then + run chown root:${NETDATA_GROUP} "${f}" + run chmod 4750 "${f}" + fi +done + +# fix the fping binary +if [ -f bin/fping ] +then + run chown root:${NETDATA_GROUP} bin/fping + run chmod 4750 bin/fping +fi + + +# ----------------------------------------------------------------------------- + +if [ ${STARTIT} -eq 0 ]; then + create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf" + netdata_banner "is installed now!" +else + progress "starting netdata" + + if ! restart_netdata "/opt/netdata/bin/netdata"; then + create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf" + netdata_banner "is installed and running now!" + else + create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" + netdata_banner "is installed now!" + fi +fi +run chown "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" +run chmod 0664 "/opt/netdata/etc/netdata/netdata.conf" diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh new file mode 100755 index 000000000..06dc82f29 --- /dev/null +++ b/packaging/makeself/jobs/10-prepare-destination.install.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old" +[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old" + +run mkdir -p "${NETDATA_INSTALL_PATH}/bin" +run mkdir -p "${NETDATA_INSTALL_PATH}/usr" +run cd "${NETDATA_INSTALL_PATH}" +run ln -s bin sbin +run cd "${NETDATA_INSTALL_PATH}/usr" +run ln -s ../bin bin +run ln -s ../sbin sbin +run ln -s . local diff --git a/packaging/makeself/jobs/50-bash-4.4.18.install.sh b/packaging/makeself/jobs/50-bash-4.4.18.install.sh new file mode 100755 index 000000000..a762d37ae --- /dev/null +++ b/packaging/makeself/jobs/50-bash-4.4.18.install.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz" + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --without-bash-malloc \ + --enable-static-link \ + --enable-net-redirections \ + --enable-array-variables \ + --disable-profiling \ + --disable-nls \ +# --disable-rpath \ +# --enable-alias \ +# --enable-arith-for-command \ +# --enable-array-variables \ +# --enable-brace-expansion \ +# --enable-casemod-attributes \ +# --enable-casemod-expansions \ +# --enable-command-timing \ +# --enable-cond-command \ +# --enable-cond-regexp \ +# --enable-directory-stack \ +# --enable-dparen-arithmetic \ +# --enable-function-import \ +# --enable-glob-asciiranges-default \ +# --enable-help-builtin \ +# --enable-job-control \ +# --enable-net-redirections \ +# --enable-process-substitution \ +# --enable-progcomp \ +# --enable-prompt-string-decoding \ +# --enable-readline \ +# --enable-select \ + + +run make clean +run make -j$(find_processors) + +cat >examples/loadables/Makefile <<EOF +all: +clean: +install: +EOF + +run make install + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/bash +fi diff --git a/packaging/makeself/jobs/50-curl-7.60.0.install.sh b/packaging/makeself/jobs/50-curl-7.60.0.install.sh new file mode 100755 index 000000000..c91598251 --- /dev/null +++ b/packaging/makeself/jobs/50-curl-7.60.0.install.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "curl-curl-7_60_0" "https://github.com/curl/curl/archive/curl-7_60_0.tar.gz" + +export LDFLAGS="-static" +export PKG_CONFIG="pkg-config --static" + +run ./buildconf + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --enable-optimize \ + --disable-shared \ + --enable-static \ + --enable-http \ + --enable-proxy \ + --enable-ipv6 \ + --enable-cookies \ + ${NULL} + +# Curl autoconf does not honour the curl_LDFLAGS environment variable +run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile + +run make clean +run make -j$(find_processors) +run make install + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/curl +fi diff --git a/packaging/makeself/jobs/50-fping-4.2.install.sh b/packaging/makeself/jobs/50-fping-4.2.install.sh new file mode 100755 index 000000000..a137753d8 --- /dev/null +++ b/packaging/makeself/jobs/50-fping-4.2.install.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "fping-4.2" "https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz" + +export CFLAGS="-static" + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --enable-ipv4 \ + --enable-ipv6 \ + ${NULL} + +cat >doc/Makefile <<EOF +all: +clean: +install: +EOF + +run make clean +run make -j$(find_processors) +run make install + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/fping +fi diff --git a/packaging/makeself/jobs/50-ioping-1.1.install.sh b/packaging/makeself/jobs/50-ioping-1.1.install.sh new file mode 100755 index 000000000..83c778c15 --- /dev/null +++ b/packaging/makeself/jobs/50-ioping-1.1.install.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "netdata-ioping-43d15a5" "https://github.com/netdata/ioping/tarball/master" + +export CFLAGS="-static" + +run make clean +run make -j$(find_processors) +run mkdir -p ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/ +run install -o root -g root -m 4750 ioping ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/ + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/ioping +fi diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh new file mode 100755 index 000000000..71ea0f63a --- /dev/null +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. ${NETDATA_MAKESELF_PATH}/functions.sh "${@}" || exit 1 + +cd "${NETDATA_SOURCE_PATH}" || exit 1 + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + export CFLAGS="-static -O3" +else + export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1" +# export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness" +fi + +run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \ + --dont-wait \ + --dont-start-it \ + ${NULL} + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/netdata + run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin + run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/cgroup-network +fi diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh new file mode 100755 index 000000000..f3056e6ac --- /dev/null +++ b/packaging/makeself/jobs/99-makeself.install.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +run cd "${NETDATA_SOURCE_PATH}" || exit 1 + +# ----------------------------------------------------------------------------- +# find the netdata version + +VERSION="$(git describe 2>/dev/null)" +if [ -z "${VERSION}" ]; then + VERSION=$(cat packaging/version) +fi + +if [ "${VERSION}" == "" ]; then + echo >&2 "Cannot find version number. Create makeself executable from source code with git tree structure." + exit 1 +fi + +# ----------------------------------------------------------------------------- +# copy the files needed by makeself installation + +run mkdir -p "${NETDATA_INSTALL_PATH}/system" + +run cp \ + packaging/makeself/post-installer.sh \ + packaging/makeself/install-or-update.sh \ + packaging/installer/functions.sh \ + configs.signatures \ + system/netdata-init-d \ + system/netdata-lsb \ + system/netdata-openrc \ + system/netdata.logrotate \ + system/netdata.service \ + "${NETDATA_INSTALL_PATH}/system/" + + +# ----------------------------------------------------------------------------- +# create a wrapper to start our netdata with a modified path + +run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv" + +run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \ + "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1 + +cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <<EOF +#!${NETDATA_INSTALL_PATH}/bin/bash +export NETDATA_BASH_LOADABLES="DISABLE" +export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}" +exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}" +EOF +run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata" + + +# ----------------------------------------------------------------------------- +# remove the links to allow untaring the archive + +run rm "${NETDATA_INSTALL_PATH}/sbin" \ + "${NETDATA_INSTALL_PATH}/usr/bin" \ + "${NETDATA_INSTALL_PATH}/usr/sbin" \ + "${NETDATA_INSTALL_PATH}/usr/local" + + +# ----------------------------------------------------------------------------- +# create the makeself archive + +run sed "s|NETDATA_VERSION|${VERSION}|g" <"${NETDATA_MAKESELF_PATH}/makeself.lsm" >"${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" + +run "${NETDATA_MAKESELF_PATH}/makeself.sh" \ + --gzip \ + --complevel 9 \ + --notemp \ + --needroot \ + --target "${NETDATA_INSTALL_PATH}" \ + --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \ + --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \ + --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \ + --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \ + "${NETDATA_INSTALL_PATH}" \ + "${NETDATA_INSTALL_PATH}.gz.run" \ + "netdata, the real-time performance and health monitoring system" \ + ./system/post-installer.sh \ + ${NULL} + +run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" + +# ----------------------------------------------------------------------------- +# copy it to the netdata build dir + +FILE="netdata-${VERSION}.gz.run" + +run mkdir -p artifacts +run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}" + +[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run +run ln -s "artifacts/${FILE}" netdata-latest.gz.run + +echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'" diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh new file mode 100755 index 000000000..d77e1717c --- /dev/null +++ b/packaging/makeself/makeself-header.sh @@ -0,0 +1,554 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +cat << EOF > "$archname" +#!/bin/sh +# This script was generated using Makeself $MS_VERSION + +ORIG_UMASK=\`umask\` +if test "$KEEP_UMASK" = n; then + umask 077 +fi + +CRCsum="$CRCsum" +MD5="$MD5sum" +TMPROOT=\${TMPDIR:=/tmp} +USER_PWD="\$PWD"; export USER_PWD + +label="$LABEL" +script="$SCRIPT" +scriptargs="$SCRIPTARGS" +licensetxt="$LICENSE" +helpheader='$HELPHEADER' +targetdir="$archdirname" +filesizes="$filesizes" +keep="$KEEP" +nooverwrite="$NOOVERWRITE" +quiet="n" +accept="n" +nodiskspace="n" +export_conf="$EXPORT_CONF" + +print_cmd_arg="" +if type printf > /dev/null; then + print_cmd="printf" +elif test -x /usr/ucb/echo; then + print_cmd="/usr/ucb/echo" +else + print_cmd="echo" +fi + +if test -d /usr/xpg4/bin; then + PATH=/usr/xpg4/bin:\$PATH + export PATH +fi + +unset CDPATH + +MS_Printf() +{ + \$print_cmd \$print_cmd_arg "\$1" +} + +MS_PrintLicense() +{ + if test x"\$licensetxt" != x; then + echo "\$licensetxt" + if test x"\$accept" != xy; then + while true + do + MS_Printf "Please type y to accept, n otherwise: " + read yn + if test x"\$yn" = xn; then + keep=n + eval \$finish; exit 1 + break; + elif test x"\$yn" = xy; then + break; + fi + done + fi + fi +} + +MS_diskspace() +{ + ( + df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }' + ) +} + +MS_dd() +{ + blocks=\`expr \$3 / 1024\` + bytes=\`expr \$3 % 1024\` + dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\ + { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\ + test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null +} + +MS_dd_Progress() +{ + if test x"\$noprogress" = xy; then + MS_dd \$@ + return \$? + fi + file="\$1" + offset=\$2 + length=\$3 + pos=0 + bsize=4194304 + while test \$bsize -gt \$length; do + bsize=\`expr \$bsize / 4\` + done + blocks=\`expr \$length / \$bsize\` + bytes=\`expr \$length % \$bsize\` + ( + dd ibs=\$offset skip=1 2>/dev/null + pos=\`expr \$pos \+ \$bsize\` + MS_Printf " 0%% " 1>&2 + if test \$blocks -gt 0; then + while test \$pos -le \$length; do + dd bs=\$bsize count=1 2>/dev/null + pcent=\`expr \$length / 100\` + pcent=\`expr \$pos / \$pcent\` + if test \$pcent -lt 100; then + MS_Printf "\b\b\b\b\b\b\b" 1>&2 + if test \$pcent -lt 10; then + MS_Printf " \$pcent%% " 1>&2 + else + MS_Printf " \$pcent%% " 1>&2 + fi + fi + pos=\`expr \$pos \+ \$bsize\` + done + fi + if test \$bytes -gt 0; then + dd bs=\$bytes count=1 2>/dev/null + fi + MS_Printf "\b\b\b\b\b\b\b" 1>&2 + MS_Printf " 100%% " 1>&2 + ) < "\$file" +} + +MS_Help() +{ + cat << EOH >&2 +\${helpheader}Makeself version $MS_VERSION + 1) Getting help or info about \$0 : + \$0 --help Print this message + \$0 --info Print embedded info : title, default target directory, embedded script ... + \$0 --lsm Print embedded lsm entry (or no LSM) + \$0 --list Print the list of files in the archive + \$0 --check Checks integrity of the archive + + 2) Running \$0 : + \$0 [options] [--] [additional arguments to embedded script] + with following options (in that order) + --confirm Ask before running embedded script + --quiet Do not print anything except error messages + --accept Accept the license + --noexec Do not run embedded script + --keep Do not erase target directory after running + the embedded script + --noprogress Do not show the progress during the decompression + --nox11 Do not spawn an xterm + --nochown Do not give the extracted files to the current user + --nodiskspace Do not check for available disk space + --target dir Extract directly to a target directory + directory path can be either absolute or relative + --tar arg1 [arg2 ...] Access the contents of the archive through the tar command + -- Following arguments will be passed to the embedded script +EOH +} + +MS_Check() +{ + OLD_PATH="\$PATH" + PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} + MD5_ARG="" + MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\` + test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\` + test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\` + PATH="\$OLD_PATH" + + if test x"\$quiet" = xn; then + MS_Printf "Verifying archive integrity..." + fi + offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\` + verb=\$2 + i=1 + for s in \$filesizes + do + crc=\`echo \$CRCsum | cut -d" " -f\$i\` + if test -x "\$MD5_PATH"; then + if test x"\`basename \$MD5_PATH\`" = xdigest; then + MD5_ARG="-a md5" + fi + md5=\`echo \$MD5 | cut -d" " -f\$i\` + if test x"\$md5" = x00000000000000000000000000000000; then + test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2 + else + md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`; + if test x"\$md5sum" != x"\$md5"; then + echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2 + exit 2 + else + test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2 + fi + crc="0000000000"; verb=n + fi + fi + if test x"\$crc" = x0000000000; then + test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2 + else + sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\` + if test x"\$sum1" = x"\$crc"; then + test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2 + else + echo "Error in checksums: \$sum1 is different from \$crc" >&2 + exit 2; + fi + fi + i=\`expr \$i + 1\` + offset=\`expr \$offset + \$s\` + done + if test x"\$quiet" = xn; then + echo " All good." + fi +} + +UnTAR() +{ + if test x"\$quiet" = xn; then + tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; } + else + tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; } + fi +} + +finish=true +xterm_loop= +noprogress=$NOPROGRESS +nox11=$NOX11 +copy=$COPY +ownership=y +verbose=n + +initargs="\$@" + +while true +do + case "\$1" in + -h | --help) + MS_Help + exit 0 + ;; + -q | --quiet) + quiet=y + noprogress=y + shift + ;; + --accept) + accept=y + shift + ;; + --info) + echo Identification: "\$label" + echo Target directory: "\$targetdir" + echo Uncompressed size: $USIZE KB + echo Compression: $COMPRESS + echo Date of packaging: $DATE + echo Built with Makeself version $MS_VERSION on $OSTYPE + echo Build command was: "$MS_COMMAND" + if test x"\$script" != x; then + echo Script run after extraction: + echo " " \$script \$scriptargs + fi + if test x"$copy" = xcopy; then + echo "Archive will copy itself to a temporary location" + fi + if test x"$NEED_ROOT" = xy; then + echo "Root permissions required for extraction" + fi + if test x"$KEEP" = xy; then + echo "directory \$targetdir is permanent" + else + echo "\$targetdir will be removed after extraction" + fi + exit 0 + ;; + --dumpconf) + echo LABEL=\"\$label\" + echo SCRIPT=\"\$script\" + echo SCRIPTARGS=\"\$scriptargs\" + echo archdirname=\"$archdirname\" + echo KEEP=$KEEP + echo NOOVERWRITE=$NOOVERWRITE + echo COMPRESS=$COMPRESS + echo filesizes=\"\$filesizes\" + echo CRCsum=\"\$CRCsum\" + echo MD5sum=\"\$MD5\" + echo OLDUSIZE=$USIZE + echo OLDSKIP=`expr $SKIP + 1` + exit 0 + ;; + --lsm) +cat << EOLSM +EOF +eval "$LSM_CMD" +cat << EOF >> "$archname" +EOLSM + exit 0 + ;; + --list) + echo Target directory: \$targetdir + offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + for s in \$filesizes + do + MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t + offset=\`expr \$offset + \$s\` + done + exit 0 + ;; + --tar) + offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + arg1="\$2" + if ! shift 2; then MS_Help; exit 1; fi + for s in \$filesizes + do + MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@" + offset=\`expr \$offset + \$s\` + done + exit 0 + ;; + --check) + MS_Check "\$0" y + exit 0 + ;; + --confirm) + verbose=y + shift + ;; + --noexec) + script="" + shift + ;; + --keep) + keep=y + shift + ;; + --target) + keep=y + targetdir=\${2:-.} + if ! shift 2; then MS_Help; exit 1; fi + ;; + --noprogress) + noprogress=y + shift + ;; + --nox11) + nox11=y + shift + ;; + --nochown) + ownership=n + shift + ;; + --nodiskspace) + nodiskspace=y + shift + ;; + --xwin) + if test "$NOWAIT" = n; then + finish="echo Press Return to close this window...; read junk" + fi + xterm_loop=1 + shift + ;; + --phase2) + copy=phase2 + shift + ;; + --) + shift + break ;; + -*) + echo Unrecognized flag : "\$1" >&2 + MS_Help + exit 1 + ;; + *) + break ;; + esac +done + +if test x"\$quiet" = xy -a x"\$verbose" = xy; then + echo Cannot be verbose and quiet at the same time. >&2 + exit 1 +fi + +if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then + echo "Administrative privileges required for this archive (use su or sudo)" >&2 + exit 1 +fi + +if test x"\$copy" \!= xphase2; then + MS_PrintLicense +fi + +case "\$copy" in +copy) + tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$ + mkdir "\$tmpdir" || { + echo "Could not create temporary directory \$tmpdir" >&2 + exit 1 + } + SCRIPT_COPY="\$tmpdir/makeself" + echo "Copying to a temporary location..." >&2 + cp "\$0" "\$SCRIPT_COPY" + chmod +x "\$SCRIPT_COPY" + cd "\$TMPROOT" + exec "\$SCRIPT_COPY" --phase2 -- \$initargs + ;; +phase2) + finish="\$finish ; rm -rf \`dirname \$0\`" + ;; +esac + +if test x"\$nox11" = xn; then + if tty -s; then # Do we have a terminal? + : + else + if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X? + if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable + GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology" + for a in \$GUESS_XTERMS; do + if type \$a >/dev/null 2>&1; then + XTERM=\$a + break + fi + done + chmod a+x \$0 || echo Please add execution rights on \$0 + if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal! + exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs" + else + exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs" + fi + fi + fi + fi +fi + +if test x"\$targetdir" = x.; then + tmpdir="." +else + if test x"\$keep" = xy; then + if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then + echo "Target directory \$targetdir already exists, aborting." >&2 + exit 1 + fi + if test x"\$quiet" = xn; then + echo "Creating directory \$targetdir" >&2 + fi + tmpdir="\$targetdir" + dashp="-p" + else + tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM" + dashp="" + fi + mkdir \$dashp \$tmpdir || { + echo 'Cannot create target directory' \$tmpdir >&2 + echo 'You should try option --target dir' >&2 + eval \$finish + exit 1 + } +fi + +location="\`pwd\`" +if test x"\$SETUP_NOCHECK" != x1; then + MS_Check "\$0" +fi +offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + +if test x"\$verbose" = xy; then + MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] " + read yn + if test x"\$yn" = xn; then + eval \$finish; exit 1 + fi +fi + +if test x"\$quiet" = xn; then + MS_Printf "Uncompressing \$label" +fi +res=3 +if test x"\$keep" = xn; then + trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15 +fi + +if test x"\$nodiskspace" = xn; then + leftspace=\`MS_diskspace \$tmpdir\` + if test -n "\$leftspace"; then + if test "\$leftspace" -lt $USIZE; then + echo + echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2 + echo "Use --nodiskspace option to skip this check and proceed anyway" >&2 + if test x"\$keep" = xn; then + echo "Consider setting TMPDIR to a directory with more free space." + fi + eval \$finish; exit 1 + fi + fi +fi + +for s in \$filesizes +do + if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then + if test x"\$ownership" = xy; then + (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .) + fi + else + echo >&2 + echo "Unable to decompress \$0" >&2 + eval \$finish; exit 1 + fi + offset=\`expr \$offset + \$s\` +done +if test x"\$quiet" = xn; then + echo +fi + +cd "\$tmpdir" +res=0 +if test x"\$script" != x; then + if test x"\$export_conf" = x"y"; then + MS_BUNDLE="\$0" + MS_LABEL="\$label" + MS_SCRIPT="\$script" + MS_SCRIPTARGS="\$scriptargs" + MS_ARCHDIRNAME="\$archdirname" + MS_KEEP="\$KEEP" + MS_NOOVERWRITE="\$NOOVERWRITE" + MS_COMPRESS="\$COMPRESS" + export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS + export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS + fi + + if test x"\$verbose" = x"y"; then + MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] " + read yn + if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then + eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?; + fi + else + eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$? + fi + if test "\$res" -ne 0; then + test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2 + fi +fi +if test x"\$keep" = xn; then + cd \$TMPROOT + /bin/rm -rf \$tmpdir +fi +eval \$finish; exit \$res +EOF diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt new file mode 100644 index 000000000..bf482c465 --- /dev/null +++ b/packaging/makeself/makeself-help-header.txt @@ -0,0 +1,44 @@ + + ^ + |.-. .-. .-. .-. . netdata + | '-' '-' '-' '-' real-time performance monitoring, done right! + +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> + + (C) Copyright 2017, Costa Tsaousis + All rights reserved + Released under GPL v3+ + + You are about to install netdata to this system. + netdata will be installed at: + + /opt/netdata + + The following changes will be made to your system: + + # USERS / GROUPS + User 'netdata' and group 'netdata' will be added, if not present. + + # LOGROTATE + This file will be installed if logrotate is present. + + - /etc/logrotate.d/netdata + + # SYSTEM INIT + This file will be installed if this system runs with systemd: + + - /lib/systemd/system/netdata.service + + or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: + + - /etc/init.d/netdata will be created + + + This package can also update a netdata installation that has been + created with another version of it. + + Your netdata configuration will be retained. + After installation, netdata will be (re-)started. + + netdata re-distributes a lot of open source software components. + Check its full license at: + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt new file mode 100644 index 000000000..bf482c465 --- /dev/null +++ b/packaging/makeself/makeself-license.txt @@ -0,0 +1,44 @@ + + ^ + |.-. .-. .-. .-. . netdata + | '-' '-' '-' '-' real-time performance monitoring, done right! + +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> + + (C) Copyright 2017, Costa Tsaousis + All rights reserved + Released under GPL v3+ + + You are about to install netdata to this system. + netdata will be installed at: + + /opt/netdata + + The following changes will be made to your system: + + # USERS / GROUPS + User 'netdata' and group 'netdata' will be added, if not present. + + # LOGROTATE + This file will be installed if logrotate is present. + + - /etc/logrotate.d/netdata + + # SYSTEM INIT + This file will be installed if this system runs with systemd: + + - /lib/systemd/system/netdata.service + + or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: + + - /etc/init.d/netdata will be created + + + This package can also update a netdata installation that has been + created with another version of it. + + Your netdata configuration will be retained. + After installation, netdata will be (re-)started. + + netdata re-distributes a lot of open source software components. + Check its full license at: + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm new file mode 100644 index 000000000..6bd4703db --- /dev/null +++ b/packaging/makeself/makeself.lsm @@ -0,0 +1,16 @@ +Begin3 +Title: netdata +Version: NETDATA_VERSION +Description: netdata is a system for distributed real-time performance and health monitoring. + It provides unparalleled insights, in real-time, of everything happening on the + system it runs (including applications such as web and database servers), using + modern interactive web dashboards. netdata is fast and efficient, designed to + permanently run on all systems (physical & virtual servers, containers, IoT + devices), without disrupting their core function. +Keywords: real-time performance and health monitoring +Author: Costa Tsaousis (costa@tsaousis.gr) +Maintained-by: Costa Tsaousis (costa@tsaousis.gr) +Original-site: https://my-netdata.io/ +Platform: Unix +Copying-policy: GPL +End diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh new file mode 100755 index 000000000..f3cb69976 --- /dev/null +++ b/packaging/makeself/makeself.sh @@ -0,0 +1,621 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Makeself version 2.3.x +# by Stephane Peter <megastep@megastep.org> +# +# Utility to create self-extracting tar.gz archives. +# The resulting archive is a file holding the tar.gz archive with +# a small Shell script stub that uncompresses the archive to a temporary +# directory and then executes a given script from withing that directory. +# +# Makeself home page: http://makeself.io/ +# +# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain. +# +# Version history : +# - 1.0 : Initial public release +# - 1.1 : The archive can be passed parameters that will be passed on to +# the embedded script, thanks to John C. Quillan +# - 1.2 : Package distribution, bzip2 compression, more command line options, +# support for non-temporary archives. Ideas thanks to Francois Petitjean +# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean: +# Support for no compression (--nocomp), script is no longer mandatory, +# automatic launch in an xterm, optional verbose output, and -target +# archive option to indicate where to extract the files. +# - 1.4 : Improved UNIX compatibility (Francois Petitjean) +# Automatic integrity checking, support of LSM files (Francois Petitjean) +# - 1.5 : Many bugfixes. Optionally disable xterm spawning. +# - 1.5.1 : More bugfixes, added archive options -list and -check. +# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big +# archives (Quake III demo) +# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm. +# More verbosity in xterms and check for embedded command's return value. +# Bugfix for Debian 2.0 systems that have a different "print" command. +# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed. +# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to +# bypass checksum verification of archives. +# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon) +# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports. +# - 2.0.1 : Added --copy +# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates. +# Added --nochown for archives +# Stopped doing redundant checksums when not necesary +# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command +# Cleaned up the code to handle error codes from compress. Simplified the extraction code. +# - 2.1.2 : Some bug fixes. Use head -n to avoid problems. +# - 2.1.3 : Bug fixes with command line when spawning terminals. +# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive. +# Added --noexec to prevent execution of embedded scripts. +# Added --nomd5 and --nocrc to avoid creating checksums in archives. +# Added command used to create the archive in --info output. +# Run the embedded script through eval. +# - 2.1.4 : Fixed --info output. +# Generate random directory name when extracting files to . to avoid problems. (Jason Trent) +# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent) +# Avoid some race conditions (Ludwig Nussel) +# Unset the $CDPATH variable to avoid problems if it is set. (Debian) +# Better handling of dot files in the archive directory. +# - 2.1.5 : Made the md5sum detection consistent with the header code. +# Check for the presence of the archive directory +# Added --encrypt for symmetric encryption through gpg (Eric Windisch) +# Added support for the digest command on Solaris 10 for MD5 checksums +# Check for available disk space before extracting to the target directory (Andreas Schweitzer) +# Allow extraction to run asynchronously (patch by Peter Hatch) +# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo) +# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere) +# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere) +# Added --target dir to allow extracting directly to a target directory (Guy Baconniere) +# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details. +# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky) +# +# (C) 1998-2017 by Stephane Peter <megastep@megastep.org> +# +# This software is released under the terms of the GNU GPL version 2 and above +# Please read the license at http://www.gnu.org/copyleft/gpl.html +# + +MS_VERSION=2.3.1 +MS_COMMAND="$0" +unset CDPATH + +for f in "${1+"$@"}"; do + MS_COMMAND="$MS_COMMAND \\\\ + \\\"$f\\\"" +done + +# For Solaris systems +if test -d /usr/xpg4/bin; then + PATH=/usr/xpg4/bin:$PATH + export PATH +fi + +# Procedures + +MS_Usage() +{ + echo "Usage: $0 [params] archive_dir file_name label startup_script [args]" + echo "params can be one or more of the following :" + echo " --version | -v : Print out Makeself version number and exit" + echo " --help | -h : Print out this help message" + echo " --tar-quietly : Suppress verbose output from the tar command" + echo " --quiet | -q : Do not print any messages other than errors." + echo " --gzip : Compress using gzip (default if detected)" + echo " --pigz : Compress with pigz" + echo " --bzip2 : Compress using bzip2 instead of gzip" + echo " --pbzip2 : Compress using pbzip2 instead of gzip" + echo " --xz : Compress using xz instead of gzip" + echo " --lzo : Compress using lzop instead of gzip" + echo " --lz4 : Compress using lz4 instead of gzip" + echo " --compress : Compress using the UNIX 'compress' command" + echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)" + echo " --base64 : Instead of compressing, encode the data using base64" + echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG" + echo " --gpg-asymmetric-encrypt-sign" + echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG" + echo " --gpg-extra opt : Append more options to the gpg command line" + echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL" + echo " --nocomp : Do not compress the data" + echo " --notemp : The archive will create archive_dir in the" + echo " current directory and uncompress in ./archive_dir" + echo " --needroot : Check that the root user is extracting the archive before proceeding" + echo " --copy : Upon extraction, the archive will first copy itself to" + echo " a temporary directory" + echo " --append : Append more files to an existing Makeself archive" + echo " The label and startup scripts will then be ignored" + echo " --target dir : Extract directly to a target directory" + echo " directory path can be either absolute or relative" + echo " --nooverwrite : Do not extract the archive if the specified target directory exists" + echo " --current : Files will be extracted to the current directory" + echo " Both --current and --target imply --notemp" + echo " --tar-extra opt : Append more options to the tar command line" + echo " --untar-extra opt : Append more options to the during the extraction of the tar archive" + echo " --nomd5 : Don't calculate an MD5 for archive" + echo " --nocrc : Don't calculate a CRC for archive" + echo " --header file : Specify location of the header script" + echo " --follow : Follow the symlinks in the archive" + echo " --noprogress : Do not show the progress during the decompression" + echo " --nox11 : Disable automatic spawn of a xterm" + echo " --nowait : Do not wait for user input after executing embedded" + echo " program from an xterm" + echo " --lsm file : LSM file describing the package" + echo " --license file : Append a license file" + echo " --help-header file : Add a header to the archive's --help output" + echo " --packaging-date date" + echo " : Use provided string as the packaging date" + echo " instead of the current date." + echo + echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive." + echo " --export-conf : Export configuration variables to startup_script" + echo + echo "Do not forget to give a fully qualified startup script name" + echo "(i.e. with a ./ prefix if inside the archive)." + exit 1 +} + +# Default settings +if type gzip 2>&1 > /dev/null; then + COMPRESS=gzip +else + COMPRESS=Unix +fi +COMPRESS_LEVEL=9 +KEEP=n +CURRENT=n +NOX11=n +NOWAIT=n +APPEND=n +TAR_QUIETLY=n +KEEP_UMASK=n +QUIET=n +NOPROGRESS=n +COPY=none +NEED_ROOT=n +TAR_ARGS=cvf +TAR_EXTRA="" +GPG_EXTRA="" +DU_ARGS=-ks +HEADER=`dirname "$0"`/makeself-header.sh +TARGETDIR="" +NOOVERWRITE=n +DATE=`LC_ALL=C date` +EXPORT_CONF=n + +# LSM file stuff +LSM_CMD="echo No LSM. >> \"\$archname\"" + +while true +do + case "$1" in + --version | -v) + echo Makeself version $MS_VERSION + exit 0 + ;; + --pbzip2) + COMPRESS=pbzip2 + shift + ;; + --bzip2) + COMPRESS=bzip2 + shift + ;; + --gzip) + COMPRESS=gzip + shift + ;; + --pigz) + COMPRESS=pigz + shift + ;; + --xz) + COMPRESS=xz + shift + ;; + --lzo) + COMPRESS=lzo + shift + ;; + --lz4) + COMPRESS=lz4 + shift + ;; + --compress) + COMPRESS=Unix + shift + ;; + --base64) + COMPRESS=base64 + shift + ;; + --gpg-encrypt) + COMPRESS=gpg + shift + ;; + --gpg-asymmetric-encrypt-sign) + COMPRESS=gpg-asymmetric + shift + ;; + --gpg-extra) + GPG_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --ssl-encrypt) + COMPRESS=openssl + shift + ;; + --nocomp) + COMPRESS=none + shift + ;; + --complevel) + COMPRESS_LEVEL="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --notemp) + KEEP=y + shift + ;; + --copy) + COPY=copy + shift + ;; + --current) + CURRENT=y + KEEP=y + shift + ;; + --tar-extra) + TAR_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --untar-extra) + UNTAR_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --target) + TARGETDIR="$2" + KEEP=y + if ! shift 2; then MS_Help; exit 1; fi + ;; + --nooverwrite) + NOOVERWRITE=y + shift + ;; + --needroot) + NEED_ROOT=y + shift + ;; + --header) + HEADER="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --license) + LICENSE=`cat $2` + if ! shift 2; then MS_Help; exit 1; fi + ;; + --follow) + TAR_ARGS=cvhf + DU_ARGS=-ksL + shift + ;; + --noprogress) + NOPROGRESS=y + shift + ;; + --nox11) + NOX11=y + shift + ;; + --nowait) + NOWAIT=y + shift + ;; + --nomd5) + NOMD5=y + shift + ;; + --nocrc) + NOCRC=y + shift + ;; + --append) + APPEND=y + shift + ;; + --lsm) + LSM_CMD="cat \"$2\" >> \"\$archname\"" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --packaging-date) + DATE="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --help-header) + HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2` + if ! shift 2; then MS_Help; exit 1; fi + [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER +" + ;; + --tar-quietly) + TAR_QUIETLY=y + shift + ;; + --keep-umask) + KEEP_UMASK=y + shift + ;; + --export-conf) + EXPORT_CONF=y + shift + ;; + -q | --quiet) + QUIET=y + shift + ;; + -h | --help) + MS_Usage + ;; + -*) + echo Unrecognized flag : "$1" + MS_Usage + ;; + *) + break + ;; + esac +done + +if test $# -lt 1; then + MS_Usage +else + if test -d "$1"; then + archdir="$1" + else + echo "Directory $1 does not exist." >&2 + exit 1 + fi +fi +archname="$2" + +if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then + if test "$TAR_ARGS" = "cvf"; then + TAR_ARGS="cf" + elif test "$TAR_ARGS" = "cvhf";then + TAR_ARGS="chf" + fi +fi + +if test "$APPEND" = y; then + if test $# -lt 2; then + MS_Usage + fi + + # Gather the info from the original archive + OLDENV=`sh "$archname" --dumpconf` + if test $? -ne 0; then + echo "Unable to update archive: $archname" >&2 + exit 1 + else + eval "$OLDENV" + fi +else + if test "$KEEP" = n -a $# = 3; then + echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2 + echo >&2 + MS_Usage + fi + # We don't want to create an absolute directory unless a target directory is defined + if test "$CURRENT" = y; then + archdirname="." + elif test x$TARGETDIR != x; then + archdirname="$TARGETDIR" + else + archdirname=`basename "$1"` + fi + + if test $# -lt 3; then + MS_Usage + fi + + LABEL="$3" + SCRIPT="$4" + test "x$SCRIPT" = x || shift 1 + shift 3 + SCRIPTARGS="$*" +fi + +if test "$KEEP" = n -a "$CURRENT" = y; then + echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2 + exit 1 +fi + +case $COMPRESS in +gzip) + GZIP_CMD="gzip -c$COMPRESS_LEVEL" + GUNZIP_CMD="gzip -cd" + ;; +pigz) + GZIP_CMD="pigz -$COMPRESS_LEVEL" + GUNZIP_CMD="gzip -cd" + ;; +pbzip2) + GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL" + GUNZIP_CMD="bzip2 -d" + ;; +bzip2) + GZIP_CMD="bzip2 -$COMPRESS_LEVEL" + GUNZIP_CMD="bzip2 -d" + ;; +xz) + GZIP_CMD="xz -c$COMPRESS_LEVEL" + GUNZIP_CMD="xz -d" + ;; +lzo) + GZIP_CMD="lzop -c$COMPRESS_LEVEL" + GUNZIP_CMD="lzop -d" + ;; +lz4) + GZIP_CMD="lz4 -c$COMPRESS_LEVEL" + GUNZIP_CMD="lz4 -d" + ;; +base64) + GZIP_CMD="base64" + GUNZIP_CMD="base64 -d -i" + ;; +gpg) + GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL" + GUNZIP_CMD="gpg -d" + ;; +gpg-asymmetric) + GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es" + GUNZIP_CMD="gpg --yes -d" + ;; +openssl) + GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256" + GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256" + ;; +Unix) + GZIP_CMD="compress -cf" + GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd" + ;; +none) + GZIP_CMD="cat" + GUNZIP_CMD="cat" + ;; +esac + +tmpfile="${TMPDIR:=/tmp}/mkself$$" + +if test -f "$HEADER"; then + oldarchname="$archname" + archname="$tmpfile" + # Generate a fake header to count its lines + SKIP=0 + . "$HEADER" + SKIP=`cat "$tmpfile" |wc -l` + # Get rid of any spaces + SKIP=`expr $SKIP` + rm -f "$tmpfile" + if test "$QUIET" = "n";then + echo Header is $SKIP lines long >&2 + fi + + archname="$oldarchname" +else + echo "Unable to open header file: $HEADER" >&2 + exit 1 +fi + +if test "$QUIET" = "n";then + echo +fi + +if test "$APPEND" = n; then + if test -f "$archname"; then + echo "WARNING: Overwriting existing file: $archname" >&2 + fi +fi + +USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'` + +if test "." = "$archdirname"; then + if test "$KEEP" = n; then + archdirname="makeself-$$-`date +%Y%m%d%H%M%S`" + fi +fi + +test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; } +if test "$QUIET" = "n";then + echo About to compress $USIZE KB of data... + echo Adding files to archive named \"$archname\"... +fi +exec 3<> "$tmpfile" +( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \ + { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; } +exec 3>&- # try to close the archive + +fsize=`cat "$tmpfile" | wc -c | tr -d " "` + +# Compute the checksums + +md5sum=00000000000000000000000000000000 +crcsum=0000000000 + +if test "$NOCRC" = y; then + if test "$QUIET" = "n";then + echo "skipping crc at user request" + fi +else + crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1` + if test "$QUIET" = "n";then + echo "CRC: $crcsum" + fi +fi + +if test "$NOMD5" = y; then + if test "$QUIET" = "n";then + echo "skipping md5sum at user request" + fi +else + # Try to locate a MD5 binary + OLD_PATH=$PATH + PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} + MD5_ARG="" + MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum` + test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5` + test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest` + PATH=$OLD_PATH + if test -x "$MD5_PATH"; then + if test `basename ${MD5_PATH}`x = digestx; then + MD5_ARG="-a md5" + fi + md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`; + if test "$QUIET" = "n";then + echo "MD5: $md5sum" + fi + else + if test "$QUIET" = "n";then + echo "MD5: none, MD5 command not found" + fi + fi +fi + +if test "$APPEND" = y; then + mv "$archname" "$archname".bak || exit + + # Prepare entry for new archive + filesizes="$filesizes $fsize" + CRCsum="$CRCsum $crcsum" + MD5sum="$MD5sum $md5sum" + USIZE=`expr $USIZE + $OLDUSIZE` + # Generate the header + . "$HEADER" + # Append the original data + tail -n +$OLDSKIP "$archname".bak >> "$archname" + # Append the new data + cat "$tmpfile" >> "$archname" + + chmod +x "$archname" + rm -f "$archname".bak + if test "$QUIET" = "n";then + echo Self-extractable archive \"$archname\" successfully updated. + fi +else + filesizes="$fsize" + CRCsum="$crcsum" + MD5sum="$md5sum" + + # Generate the header + . "$HEADER" + + # Append the compressed tar data after the stub + if test "$QUIET" = "n";then + echo + fi + cat "$tmpfile" >> "$archname" + chmod +x "$archname" + if test "$QUIET" = "n";then + echo Self-extractable archive \"$archname\" successfully created. + fi +fi +rm -f "$tmpfile" diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh new file mode 100755 index 000000000..38cc41ef7 --- /dev/null +++ b/packaging/makeself/post-installer.sh @@ -0,0 +1,11 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later + +# This script is started using the shell of the system +# and executes our 'install-or-update.sh' script +# using the netdata supplied, statically linked BASH +# +# so, at 'install-or-update.sh' we are always sure +# we run under BASH v4. + +./bin/bash system/install-or-update.sh "${@}" diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh new file mode 100755 index 000000000..f7507c2d2 --- /dev/null +++ b/packaging/makeself/run-all-jobs.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +LC_ALL=C +umask 002 + +# be nice +renice 19 $$ >/dev/null 2>/dev/null + +# ----------------------------------------------------------------------------- +# prepare the environment for the jobs + +# installation directory +export NETDATA_INSTALL_PATH="${1-/opt/netdata}" + +# our source directory +export NETDATA_MAKESELF_PATH="$(dirname "${0}")" +if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] + then + export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" +fi + +# netdata source directory +export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.." + +# make sure ${NULL} is empty +export NULL= + +# ----------------------------------------------------------------------------- + +cd "${NETDATA_MAKESELF_PATH}" || exit 1 + +. ./functions.sh "${@}" || exit 1 + +for x in jobs/*.install.sh +do + progress "running ${x}" + "${x}" "${NETDATA_INSTALL_PATH}" +done + +echo >&2 "All jobs for static packaging done successfully." +exit 0 diff --git a/packaging/manual_nightly_deployment.sh b/packaging/manual_nightly_deployment.sh new file mode 100755 index 000000000..a0999bb18 --- /dev/null +++ b/packaging/manual_nightly_deployment.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +# +# This tool allows netdata team to manually deploy nightlies +# It emulates the nightly operations required for a new version to be published for our users +# +# Copyright: SPDX-License-Identifier: GPL-3.0-or-later +# +# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud> +# +set -e + +# If we are not in netdata git repo, at the top level directory, fail +TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") +CWD=$(git rev-parse --show-cdup || echo "") +if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then + echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository" + echo "Changelog generation process aborted" + exit 1 +fi + +if [ $# -lt 1 ] || [ $# -gt 2 ]; then + echo "Run as ./$(basename "$0") [docker|gcs]|all] from the top level directory of netdata GIT repository" + exit 1 +fi + +GSUTIL_BINARY=$(command -v gsutil 2> /dev/null) +if [ -z "${GSUTIL_BINARY}" ]; then + echo "No gsutil utility available, you need gsutil deployed to manually deploy to GCS" + exit 1 +fi; + +# Function declarations +publish_docker() { + + # Ensure REPOSITORY present + if [ -z "${REPOSITORY}" ]; then + echo "Please provide the repository to deploy the containers:" + read -r REPOSITORY + export REPOSITORY + else + echo "Docker publishing to ${REPOSITORY}" + fi + + # Ensure DOCKER_USERNAME present + if [ -z "${DOCKER_USERNAME}" ]; then + echo "For repository ${REPOSITORY}, Please provide the docker USERNAME to use:" + read -r DOCKER_USERNAME + export DOCKER_USERNAME + else + echo "Using docker username ${DOCKER_USERNAME}" + fi + + # Ensure DOCKER_PASS present + if [ -z "${DOCKER_PASS}" ]; then + echo "Username ${DOCKER_USERNAME} received, now give me the password:" + read -r -s DOCKER_PASS + export DOCKER_PASS + else + echo "Docker password has already been set to env, using that" + fi + + echo "Building Docker images.." + packaging/docker/build.sh + + echo "Publishing Docker images.." + packaging/docker/publish.sh +} + +publish_nightly_binaries() { + echo "Publishing nightly binaries to GCS" + + echo "Please select the bucket to sync, from the ones available to you:" + bucket_list=$(${GSUTIL_BINARY} list | tr '\n' ' ') + declare -A buckets + idx=0 + for abucket in ${bucket_list}; do + echo "${idx}. ${abucket}" + buckets["${idx}"]=${abucket} + ((idx=idx+1)) + done + read -p"Selection>" -r -n 1 selected_bucket + + echo "Ok!" + echo "Syncing artifacts directory contents with GCS bucket: ${buckets[${selected_bucket}]}" + if [ -d artifacts ]; then + ${GSUTIL_BINARY} -m rsync -r artifacts "${buckets["${selected_bucket}"]}" + echo "GCS Sync complete!" + else + echo "Directory artifacts does not exist, nothing to do on GCS" + fi +} + +prepare_and_publish_gcs() { + # Prepare the artifacts directory + echo "Preparing artifacts directory contents" + .travis/create_artifacts.sh + + # Publish it to GCS + publish_nightly_binaries + + # Clean up + echo "Cleaning up repository" + make clean || echo "Nothing to clean" + make distclean || echo "Nothing to distclean" + rm -rf artifacts +} + +# Mandatory variable declarations +export TRAVIS_REPO_SLUG="netdata/netdata" + +echo "Manual nightly deployment procedure started" +case "$1" in + "docker") + publish_docker + ;; + "gcs") + prepare_and_publish_gcs + ;; + "all") + publish_docker + prepare_and_publish_gcs + ;; + *) + echo "ERROR: Invalid request parameter $1. Valid values are: docker, gcs, all" + ;; +esac +echo "Manual nightly deployment completed!" diff --git a/packaging/version b/packaging/version index 79f9beba8..440ddd8f1 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.14.0 +v1.15.0 |