summaryrefslogtreecommitdiffstats
path: root/packaging
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-13 05:05:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-13 05:05:16 +0000
commit8f5d8f3de6cae180af37917ef978a4affc2cd464 (patch)
tree4bfe1abc6d19c2dd635d1b83cc0e73d0aa6904ac /packaging
parentAdding upstream version 1.17.0. (diff)
downloadnetdata-8f5d8f3de6cae180af37917ef978a4affc2cd464.tar.xz
netdata-8f5d8f3de6cae180af37917ef978a4affc2cd464.zip
Adding upstream version 1.17.1.upstream/1.17.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'packaging')
-rw-r--r--packaging/DISTRIBUTIONS.md254
-rw-r--r--packaging/docker/Dockerfile92
-rw-r--r--packaging/docker/README.md251
-rwxr-xr-xpackaging/docker/build-test.sh74
-rwxr-xr-xpackaging/docker/build.sh72
-rwxr-xr-xpackaging/docker/check_login.sh41
-rwxr-xr-xpackaging/docker/publish.sh116
-rwxr-xr-xpackaging/docker/run.sh25
-rw-r--r--packaging/installer/README.md23
-rw-r--r--packaging/maintainers/README.md80
-rw-r--r--packaging/makeself/README.md48
-rwxr-xr-xpackaging/makeself/build-x86_64-static.sh42
-rwxr-xr-xpackaging/makeself/build.sh61
-rwxr-xr-xpackaging/makeself/functions.sh62
-rwxr-xr-xpackaging/makeself/install-alpine-packages.sh48
-rwxr-xr-xpackaging/makeself/install-or-update.sh243
-rwxr-xr-xpackaging/makeself/jobs/10-prepare-destination.install.sh16
-rwxr-xr-xpackaging/makeself/jobs/50-bash-4.4.18.install.sh54
-rwxr-xr-xpackaging/makeself/jobs/50-curl-7.60.0.install.sh34
-rwxr-xr-xpackaging/makeself/jobs/50-fping-4.2.install.sh29
-rwxr-xr-xpackaging/makeself/jobs/50-ioping-1.1.install.sh18
-rwxr-xr-xpackaging/makeself/jobs/70-netdata-git.install.sh30
-rwxr-xr-xpackaging/makeself/jobs/99-makeself.install.sh99
-rwxr-xr-xpackaging/makeself/makeself-header.sh557
-rw-r--r--packaging/makeself/makeself-help-header.txt44
-rw-r--r--packaging/makeself/makeself-license.txt44
-rw-r--r--packaging/makeself/makeself.lsm16
-rwxr-xr-xpackaging/makeself/makeself.sh621
-rwxr-xr-xpackaging/makeself/post-installer.sh11
-rwxr-xr-xpackaging/makeself/run-all-jobs.sh42
-rwxr-xr-xpackaging/manual_nightly_deployment.sh127
-rw-r--r--packaging/version2
32 files changed, 3265 insertions, 11 deletions
diff --git a/packaging/DISTRIBUTIONS.md b/packaging/DISTRIBUTIONS.md
new file mode 100644
index 000000000..286e2dd10
--- /dev/null
+++ b/packaging/DISTRIBUTIONS.md
@@ -0,0 +1,254 @@
+# Netdata distribution support matrix
+
+![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true)
+
+In the following table we've listed Netdata's official supported operating systems. We detail the distributions, flavors, and the level of support Netdata is currently capable to provide.
+
+The following table is a work in progress. We have concluded on the list of distributions
+that we currently supporting and we are working on documenting our current state so that our users
+have complete visibility over the range of support.
+
+**Legend**:
+
+- **Version**: Operating system version supported
+- **Family**: The family that the OS belongs to
+- **CI: Smoke Testing**: Smoke testing has been implemented on our CI, to prevent broken code reaching our users
+- **CI: Testing**: Testing has been implemented to prevent broken or problematic code reaching our users
+- **CD**: Continious deployment support has been fully enabled for this operating system
+- **.DEB**: We provide a `.DEB` package for that particular operating system
+- **.RPM**: We provide a `.RPM` package for that particular operating system
+- **Installer**: Running netdata from source, using our installer, is working for this operating system
+- **Kickstart**: Kickstart installation is working fine for this operating system
+- **Kickstart64**: Kickstart static64 installation is working fine for this operating system
+- **Community**: This operating system receives community support, such as packaging maintainers, contributors, and so on
+
+## AMD64 Architecture
+
+| Version | Family | CI: Smoke testing | CI: Testing | CD | .DEB | .RPM | Installer | Kickstart | Kickstart64 | Community
+:------------------: | :------------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------:
+| 14.04.6 LTS (Trusty Tahr) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 16.04.6 LTS (Xenial Xerus) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 18.04.2 LTS (Bionic Beaver) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 19.04 (Disco Dingo) Latest | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 7 (Wheezy) | Debian | &#10004; | &#63; | &#10004; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 8 (Jessie) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 9 (Stretch) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 10 (Buster) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 6.* | RHEL | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 7.* | RHEL | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 8.* | RHEL | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Fedora 28 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 29 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 30 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 31 | Fedora | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| CentOS 6.* | Cent OS | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 7.* | Cent OS | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 8.* | Cent OS | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| OpenSuSE Leap 15.0 | Open SuSE | &#10004; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| OpenSuSE Leap 15.1 | Open SuSE | &#10004; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| OpenSuSE Tumbleweed | Open SuSE | &#10004; | &#63; | &#63; | N/A | &#10007; | &#10004; | &#63; | &#10004; | &#63;
+| SLES 11 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 12 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 15 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Alpine | Alpine | &#10004; | &#63; | &#10007; | N/A | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Arch Linux (latest) | Arch | &#10004; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| All other linux | Other | &#63; | &#63; | &#63; | &#10007; | &#10007; | &#63; | &#63; | &#10004; | &#63;
+
+## x86 Architecture
+
+| Version | Family | CI: Smoke testing | CI: Testing | CD | .DEB | .RPM | Installer | Kickstart | Kickstart64 | Community
+:------------------: | :------------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------:
+| 14.04.6 LTS (Trusty Tahr) | Ubuntu | &#10004; | &#63; | &#10004; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 16.04.6 LTS (Xenial Xerus) | Ubuntu | &#10004; | &#63; | &#10004; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 18.04.2 LTS (Bionic Beaver) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 19.04 (Disco Dingo) Latest | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 7 (Wheezy) | Debian | &#10004; | &#63; | &#10004; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 8 (Jessie) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 9 (Stretch) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 10 (Buster) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 6.* | RHEL | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 7.* | RHEL | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 8.* | RHEL | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Fedora 28 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 29 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 30 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 31 | Fedora | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| CentOS 6.* | Cent OS | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 7.* | Cent OS | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 8.* | Cent OS | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| OpenSuSE Leap 15.0 | Open SuSE | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| OpenSuSE Leap 15.1 | Open SuSE | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| OpenSuSE Tumbleweed | Open SuSE | &#10004; | &#63; | &#63; | N/A | &#10007; | &#10004; | &#63; | &#10004; | &#63;
+| SLES 11 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 12 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 15 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Alpine | Alpine | &#10004; | &#63; | &#10007; | N/A | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Arch Linux (latest) | Arch | &#10004; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| All other linux | Other | &#63; | &#63; | &#63; | &#10007; | &#10007; | &#63; | &#63; | &#10004; | &#63;
+
+## Supported functionalities accross different distribution channels
+
+On the following section we try to depict what functionalities are available, across the different distribution channels.
+There are various limitations and problems we try to attend as we evolve and grow. Through this report we want to provide some clarity as to what is available and in what way. Of course we strive to deliver our full solution through all channels, but that may not be feasible yet for some cases.
+
+**Legend**:
+
+- **Auto-detect**: Depends on the programs package dependencies. If the required dependencies are covered during compile time, capability is enabled
+- **YES**: This flag implies that the functionality is available for that distribution channel
+- **NO**: Not available at the moment for that distribution channel at this time, but may be a work-in-progress effort from the Netdata team.
+- **At Runtime**: The given module or functionality is available and only requires configuration after install to enable it
+
+### Core functionality
+
+#### Core
+
+This is the base netdata capability, that includes basic monitoring, embedded web server, and so on.
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|YES|YES|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: None
+- **What packages required for auto-detect?**: `install-required-packages.sh netdata`
+
+#### DB Engine
+
+This is the brand new database engine capability of netdata. It is a mandatory facility required by netdata. Given it's special needs and dependencies though, it remains an optional facility so that users can enjoy netdata even when they cannot cover the dependencies or the H/W requirements.
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: `--disable-dbengine`
+- **What packages required for auto-detect?**: `openssl`, `libuv1`, `lz4`, `Judy`
+
+#### Encryption Support (HTTPS)
+
+This is Netdata's TLS capability that incorporates encryption on the web server and the APIs between master and slaves. Also a mandatory facility for Netdata, but remains optional for users who are limited or not interested in tight security
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-https
+- **What packages required for auto-detect?**: `openssl`
+
+### Libraries/optimizations
+
+#### JSON-C Support
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-jsonc
+- **What packages required for auto-detect?**: `json-c`
+
+#### Link time optimizations
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-lto
+- **What packages required for auto-detect?**: No package dependency, depends on GCC version
+
+### External plugins, built with netdata build tools
+
+#### FREEIPMI
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|No|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-freeipmi
+- **What packages required for auto-detect?**: `freeipmi-dev (or -devel)`
+
+#### NFACCT
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-nfacct
+- **What packages required for auto-detect?**: `libmnl-dev`, `libnetfilter_acct-dev`
+
+#### Xenstat
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|NO|NO|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-xenstat
+- **What packages required for auto-detect?**: `xen-dom0-libs-devel or xen-devel`, `yajl-dev or yajl-devel`
+ Note: for cent-OS based systems you will need `centos-release-xen` repository to get xen-devel
+
+#### CUPS
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-cups
+- **What packages required for auto-detect?**: `cups-devel`
+
+#### FPING
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: ${INSTALL_PATH}/netdata/plugins.d/fping.plugin install
+- **Flags to disable from source**: None -- just dont install
+- **What packages required for auto-detect?**: None - only fping installed to start it up
+
+#### IOPING
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: ${INSTALL_PATH}/netdata/plugins.d/ioping.plugin install
+- **Flags to disable from source**: None -- just dont install
+- **What packages required for auto-detect?**: None - only ioping installed to start it up
+
+#### PERF
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: Inside netdata.conf, section `[Plugins]`, set `"perf = yes"`
+- **Flags to disable from source**: --disable-perf
+- **What packages required for auto-detect?**: None
+
+### Backends
+
+#### Prometheus remote write
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-backend-prometheus-remote-write
+- **What packages required for auto-detect?**: `snappy-devel`, `protobuf`, `protobuf-compiler`
+
+#### AWS Kinesis
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|NO|NO|
+
+- **Flags/instructions to enable**: [Instructions for AWS Kinesis](https://docs.netdata.cloud/backends/aws_kinesis)
+- **Flags to disable from source**: --disable-backend-kinesis
+- **What packages required for auto-detect?**: `AWS SDK for C++`, `libcurl`, `libssl`, `libcrypto`
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
new file mode 100644
index 000000000..4be2d93b2
--- /dev/null
+++ b/packaging/docker/Dockerfile
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+# author : paulfantom
+
+# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option.
+# It is automated in `build.sh` script
+ARG ARCH=amd64
+# This image contains preinstalled dependecies
+FROM netdata/builder:${ARCH} as builder
+
+ENV JUDY_VER 1.0.5
+
+# Copy source
+COPY . /opt/netdata.git
+WORKDIR /opt/netdata.git
+
+# Install from source
+RUN chmod +x netdata-installer.sh && ./netdata-installer.sh --dont-wait --dont-start-it
+
+# files to one directory
+RUN mkdir -p /app/usr/sbin/ \
+ /app/usr/share \
+ /app/usr/libexec \
+ /app/usr/lib \
+ /app/var/cache \
+ /app/var/lib \
+ /app/etc && \
+ mv /usr/share/netdata /app/usr/share/ && \
+ mv /usr/libexec/netdata /app/usr/libexec/ && \
+ mv /usr/lib/netdata /app/usr/lib/ && \
+ mv /var/cache/netdata /app/var/cache/ && \
+ mv /var/lib/netdata /app/var/lib/ && \
+ mv /etc/netdata /app/etc/ && \
+ mv /usr/sbin/netdata /app/usr/sbin/ && \
+ mv /judy-${JUDY_VER} /app/judy-${JUDY_VER} && \
+ mv packaging/docker/run.sh /app/usr/sbin/ && \
+ chmod +x /app/usr/sbin/run.sh
+
+#####################################################################
+ARG ARCH
+# This image contains preinstalled dependecies
+FROM netdata/base:${ARCH}
+
+# Conditional subscribiton to Polyverse's Polymorphic Linux repositories
+RUN if [ "$(uname -m)" == "x86_64" ]; then \
+ apk update && apk upgrade; \
+ curl https://sh.polyverse.io | sh -s install gcxce5byVQbtRz0iwfGkozZwy support+netdata@polyverse.io; \
+ if [ $? -eq 0 ]; then \
+ apk update && \
+ apk upgrade --available --no-cache && \
+ sed -in 's/^#//g' /etc/apk/repositories; \
+ fi \
+ fi
+
+# Copy files over
+RUN mkdir -p /opt/src
+COPY --from=builder /app /
+
+# Configure system
+ARG NETDATA_UID=201
+ARG NETDATA_GID=201
+ENV DOCKER_GRP netdata
+ENV DOCKER_USR netdata
+RUN \
+ # provide judy installation to base image
+ apk add make alpine-sdk shadow && \
+ cd /judy-${JUDY_VER} && make install && cd / && \
+ # Clean the source stuff once judy is installed
+ rm -rf /judy-${JUDY_VER} && apk del make alpine-sdk && \
+ # fping from alpine apk is on a different location. Moving it.
+ mv /usr/sbin/fping /usr/local/bin/fping && \
+ chmod 4755 /usr/local/bin/fping && \
+ mkdir -p /var/log/netdata && \
+ # Add netdata user
+ addgroup -g ${NETDATA_GID} -S "${DOCKER_GRP}" && \
+ adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G "${DOCKER_GRP}" "${DOCKER_USR}" && \
+ # Apply the permissions as described in
+ # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories
+ chown -R root:netdata /etc/netdata && \
+ chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \
+ chown -R root:netdata /usr/lib/netdata && \
+ chown -R root:netdata /usr/libexec/netdata/ && \
+ chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \
+ chmod 0750 /var/lib/netdata /var/cache/netdata && \
+ # Link log files to stdout
+ ln -sf /dev/stdout /var/log/netdata/access.log && \
+ ln -sf /dev/stdout /var/log/netdata/debug.log && \
+ ln -sf /dev/stderr /var/log/netdata/error.log
+
+ENV NETDATA_PORT 19999
+EXPOSE $NETDATA_PORT
+
+ENTRYPOINT ["/usr/sbin/run.sh"]
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
new file mode 100644
index 000000000..10878f578
--- /dev/null
+++ b/packaging/docker/README.md
@@ -0,0 +1,251 @@
+# Install Netdata with Docker
+
+Running Netdata in a container works best for an internal network or to quickly analyze a host. Docker helps you get set up quickly, and doesn't install anything permanent on the system, which makes uninstalling Netdata easy.
+
+See our full list of Docker images at [Docker Hub](https://hub.docker.com/r/netdata/netdata).
+
+## Limitations running Netdata in Docker
+
+For monitoring the whole host, running Netdata in a container can limit its capabilities.
+Some data, like the host OS performance or status, is not accessible or not as detailed in a container as when running Netdata directly on the host.
+
+A way around this is to provide special mounts to the Docker container so that Netdata can get visibility on host OS information like `/sys` and `/proc` folders or even `/etc/group` and shadow files.
+
+Also, we now ship Docker images using an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, not a COMMAND directive. Please adapt your execution scripts accordingly. You can find more information about ENTRYPOINT vs COMMAND in the [Docker documentation](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
+
+### Package scrambling in runtime (x86_64 only)
+
+Our x86_64 Docker images use [Polymorphic Polyverse Linux package scrambling](https://polyverse.io/how-it-works/). For increased security, you can enable rescrambling of Netdata packages during runtime by setting the environment variable `RESCRAMBLE=true` while starting Netdata with a Docker container.
+
+## Run Netdata with the docker command
+
+Quickly start Netdata with the `docker` command. Netdata is then available at <http://host:19999>
+
+```bash
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+The above can be converted to `docker-compose` file for ease of management:
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ hostname: example.com # set to fqdn of host
+ ports:
+ - 19999:19999
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+```
+
+If you don't want to use the apps.plugin functionality, you can remove the mounts of `/etc/passwd` and `/etc/group` (they are used to get proper user and group names for the monitored host) to get slightly better security.
+
+### Docker container names resolution
+
+There are a few options for resolving container names within Netdata. Some methods of doing so will allow root access to your machine from within the container. Please read the following carefully.
+
+#### Docker socket proxy (safest option)
+
+Deploy a Docker socket proxy that accepts and filters out requests using something like [HAProxy](https://docs.netdata.cloud/docs/running-behind-haproxy/) so that it restricts connections to read-only access to the CONTAINERS endpoint.
+
+The reason it's safer to expose the socket to the proxy is because Netdata has a TCP port exposed outside the Docker network. Access to the proxy container is limited to only within the network.
+
+Below is [an example repository (and image)](https://github.com/Tecnativa/docker-socket-proxy) that provides a proxy to the socket.
+
+You run the Docker Socket Proxy in its own Docker Compose file and leave it on a private network that you can add to other services that require access.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ # ... rest of your config ...
+ ports:
+ - 19999:19999
+ environment:
+ - DOCKER_HOST=proxy:2375
+ proxy:
+ image: tecnativa/docker-socket-proxy
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ - CONTAINERS=1
+
+```
+**Note:** Replace `2375` with the port of your proxy.
+
+#### Giving group access to the Docker socket (less safe)
+
+**Important Note**: You should seriously consider the necessity of activating this option,
+as it grants to the `netdata` user access to the privileged socket connection of docker service and therefore your whole machine.
+
+If you want to have your container names resolved by Netdata, make the `netdata` user be part of the group that owns the socket.
+
+To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the Netdata container,
+where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host.
+
+This group number can be found by running the following (if socket group ownership is docker):
+
+```bash
+grep docker /etc/group | cut -d ':' -f 3
+```
+
+#### Running as root (unsafe)
+
+**Important Note**: You should seriously consider the necessity of activating this option,
+as it grants to the `netdata` user access to the privileged socket connection of docker service and therefore your whole machine.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ # ... rest of your config ...
+ volumes:
+ # ... other volumes ...
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ - DOCKER_USR=root
+```
+
+### Pass command line options to Netdata
+
+Since we use an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, you can provide [Netdata daemon command line options](https://docs.netdata.cloud/daemon/#command-line-options) such as the IP address Netdata will be running on, using the [command instruction](https://docs.docker.com/engine/reference/builder/#cmd).
+
+## Install Netdata using Docker Compose with SSL/TLS enabled HTTP Proxy
+
+For a permanent installation on a public server, you should [secure the Netdata instance](../../docs/netdata-security.md). This section contains an example of how to install Netdata with an SSL reverse proxy and basic authentication.
+
+You can use the following `docker-compose.yml` and Caddyfile files to run Netdata with Docker. Replace the domains and email address for [Let's Encrypt](https://letsencrypt.org/) before starting.
+
+### Caddyfile
+
+This file needs to be placed in `/opt` with name `Caddyfile`. Here you customize your domain and you need to provide your email address to obtain a Let's Encrypt certificate. Certificate renewal will happen automatically and will be executed internally by the caddy server.
+
+```caddyfile
+netdata.example.org {
+ proxy / netdata:19999
+ tls admin@example.org
+}
+```
+
+### docker-compose.yml
+
+After setting Caddyfile run this with `docker-compose up -d` to have fully functioning Netdata setup behind HTTP reverse proxy.
+
+```yaml
+version: '3'
+volumes:
+ caddy:
+
+services:
+ caddy:
+ image: abiosoft/caddy
+ ports:
+ - 80:80
+ - 443:443
+ volumes:
+ - /opt/Caddyfile:/etc/Caddyfile
+ - caddy:/root/.caddy
+ environment:
+ ACME_AGREE: 'true'
+ netdata:
+ restart: always
+ hostname: netdata.example.org
+ image: netdata/netdata
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+```
+
+### Restrict access with basic auth
+
+You can restrict access by following [official caddy guide](https://caddyserver.com/docs/basicauth) and adding lines to Caddyfile.
+
+## Publish a test image to your own repository
+
+At Netdata, we provide multiple ways of testing your Docker images using your own repositories.
+You may either use the command line tools available or take advantage of our Travis CI infrastructure.
+
+### Using tools manually from the command line
+
+The script `packaging/docker/build-test.sh` can be used to create an image and upload it to a repository of your choosing.
+
+```bash
+Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s]
+ -s skip build, just push the image
+Builds an amd64 image and pushes it to the docker hub repository REPOSITORY
+```
+
+This is especially useful when testing a Pull Request for Kubernetes, since you can set `image` to an immutable repository and tag, set the `imagePullPolicy` to `Always` and just keep uploading new images.
+
+Example:
+
+We get a local copy of the Helm chart at <https://github.com/netdata/helmchart>. We modify `values.yaml` to have the following:
+
+```yaml
+image:
+ repository: cakrit/netdata-prs
+ tag: PR5576
+ pullPolicy: Always
+```
+
+We check out PR5576 and run the following:
+
+```bash
+./packaging/docker/build-test.sh -r cakrit/netdata-prs -v PR5576 -u cakrit -p 'XXX'
+```
+
+Then we can run `helm install [path to our helmchart clone]`.
+
+If we make changes to the code, we execute the same `build-test.sh` command, followed by `helm upgrade [name] [path to our helmchart clone]`
+
+### Inside Netdata organization, using Travis CI
+
+To enable Travis CI integration on your own repositories (Docker and Github), you need to be part of the Netdata organization.
+
+Once you have contacted the Netdata owners to setup you up on Github and Travis, execute the following steps
+
+- Preparation
+ - Have Netdata forked on your personal GitHub account
+ - Get a GITHUB token: Go to GitHub settings -> Developer Settings -> Personal access tokens, generate a new token with full access to repo_hook, read only access to admin:org, public_repo, repo_deployment, repo:status and user:email settings enabled. This will be your GITHUB_TOKEN that is described later in the instructions, so keep it somewhere safe until is needed.
+ - Contact the Netdata team and seek for permissions on <https://scan.coverity.com> should you require Travis to be able to push your forked code to coverity for analysis and report. Once you are setup, you should have your email you used in coverity and a token from them. These will be your COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN that we will refer to later.
+ - Have a valid Docker hub account, the credentials from this account will be your DOCKER_USERNAME and DOCKER_PWD mentioned later
+
+- Setting up Travis CI for your own fork (Detailed instructions provided by Travis team [here](https://docs.travis-ci.com/user/tutorial/))
+ - Login to travis with your own GITHUB credentials (There is Open Auth access)
+ - Go to your profile settings, under [repositories](https://travis-ci.com/account/repositories) section and setup your Netdata fork to be built by travis
+ - Once the repository has been setup, go to repository settings within travis (usually under <https://travis-ci.com/NETDATA_DEVELOPER/netdata/settings>, where "NETDATA_DEVELOPER" is your github handle) and select your desired settings.
+
+- While in Travis settings, under Netdata repository settings in the Environment Variables section, you need to add the following:
+ - DOCKER_USERNAME and DOCKER_PWD variables so that Travis can login to your docker hub account and publish docker images there.
+ - REPOSITORY variable to "NETDATA_DEVELOPER/netdata" where NETDATA_DEVELOPER is your github handle again.
+ - GITHUB_TOKEN variable with the token generated on the preparation step, for travis workflows to function properly
+ - COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN variables to enable Travis to submit your code for analysis to Coverity.
+
+Having followed these instructions, your forked repository should be all set up for Travis Integration, happy testing!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fdocker%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/docker/build-test.sh b/packaging/docker/build-test.sh
new file mode 100755
index 000000000..3c55e1736
--- /dev/null
+++ b/packaging/docker/build-test.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# Docker build wrapper, for testing manually the docker build process
+# TODO: This script should consume build.sh after setting up required parameters
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Chris Akritidis (chris@netdata.cloud)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+printhelp() {
+ echo "Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s]
+ -s skip build, just push the image
+Builds an amd64 image and pushes it to the docker hub repository REPOSITORY"
+}
+
+set -e
+
+if [ ! -f .gitignore ]; then
+ echo "Run as ./packaging/docker/$(basename "$0") from top level directory of git repository"
+ exit 1
+fi
+
+DOBUILD=1
+while getopts :r:v:u:p:s option
+do
+ case "$option" in
+ r)
+ REPOSITORY=$OPTARG
+ ;;
+ v)
+ VERSION=$OPTARG
+ ;;
+ u)
+ DOCKER_USERNAME=$OPTARG
+ ;;
+ p)
+ DOCKER_PWD=$OPTARG
+ ;;
+ s)
+ DOBUILD=0
+ ;;
+ *)
+ printhelp
+ exit 1
+ ;;
+ esac
+done
+
+if [ -n "${REPOSITORY}" ]; then
+ if [ $DOBUILD -eq 1 ] ; then
+ echo "Building ${VERSION:-latest} of ${REPOSITORY} container"
+ docker run --rm --privileged multiarch/qemu-user-static:register --reset
+
+ # Build images using multi-arch Dockerfile.
+ eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION:-latest}" --file packaging/docker/Dockerfile ./
+
+ # Create temporary docker CLI config with experimental features enabled (manifests v2 need it)
+ mkdir -p /tmp/docker
+ #echo '{"experimental":"enabled"}' > /tmp/docker/config.json
+ fi
+
+ if [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then
+ # Login to docker hub to allow futher operations
+ echo "Logging into docker"
+ echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin
+
+ echo "Pushing ${REPOSITORY}:${VERSION}"
+ docker --config /tmp/docker push "${REPOSITORY}:${VERSION}"
+ fi
+else
+ echo "Missing parameter. REPOSITORY=${REPOSITORY}"
+ printhelp
+ exit 1
+fi
diff --git a/packaging/docker/build.sh b/packaging/docker/build.sh
new file mode 100755
index 000000000..456114b31
--- /dev/null
+++ b/packaging/docker/build.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+#
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
+ echo "This mechanism currently can only run on BASH version 4 and above"
+ exit 1
+fi
+
+VERSION="$1"
+declare -A ARCH_MAP
+ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64")
+DEVEL_ARCHS=(amd64)
+[ "${ARCHS}" ] || ARCHS="${!ARCH_MAP[@]}" # Use default ARCHS unless ARCHS are externally provided
+
+if [ -z ${REPOSITORY} ]; then
+ REPOSITORY="${TRAVIS_REPO_SLUG}"
+ if [ -z ${REPOSITORY} ]; then
+ echo "REPOSITORY not set, build cannot proceed"
+ exit 1
+ else
+ echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}"
+ fi
+fi
+
+# When development mode is set, build on DEVEL_ARCHS
+if [ ! -z ${DEVEL+x} ]; then
+ declare -a ARCHS=(${DEVEL_ARCHS[@]})
+fi
+
+# Ensure there is a version, the most appropriate one
+if [ "${VERSION}" == "" ]; then
+ VERSION=$(git tag --points-at)
+ if [ "${VERSION}" == "" ]; then
+ VERSION="latest"
+ fi
+fi
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
+ echo "Docker build process aborted"
+ exit 1
+fi
+
+echo "Docker image build in progress.."
+echo "Version : ${VERSION}"
+echo "Repository : ${REPOSITORY}"
+echo "Architectures : ${ARCHS[*]}"
+
+docker run --rm --privileged multiarch/qemu-user-static:register --reset
+
+# Build images using multi-arch Dockerfile.
+for ARCH in ${ARCHS[@]}; do
+ TAG="${REPOSITORY}:${VERSION}-${ARCH}"
+ echo "Building tag ${TAG}.."
+ eval docker build --no-cache \
+ --build-arg ARCH="${ARCH}" \
+ --tag "${TAG}" \
+ --file packaging/docker/Dockerfile ./
+ echo "..Done!"
+done
+
+echo "Docker build process completed!"
diff --git a/packaging/docker/check_login.sh b/packaging/docker/check_login.sh
new file mode 100755
index 000000000..7cc8d4e50
--- /dev/null
+++ b/packaging/docker/check_login.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+#
+# This is a credential checker script, to help get early input on docker credentials status
+# If these are wrong, then build/publish has no point running
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
+ echo "This mechanism currently can only run on BASH version 4 and above"
+ exit 1
+fi
+
+DOCKER_CMD="docker "
+
+# There is no reason to continue if we cannot log in to docker hub
+if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then
+ echo "No docker hub username or password found, aborting without publishing"
+ exit 1
+fi
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
+ echo "Docker build process aborted"
+ exit 1
+fi
+
+# Login to docker hub to allow futher operations
+echo "Attempting to login to docker"
+echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin
+
+echo "Docker login successful!"
+$DOCKER_CMD logout
+
+echo "Docker login validation completed"
diff --git a/packaging/docker/publish.sh b/packaging/docker/publish.sh
new file mode 100755
index 000000000..e35f063ed
--- /dev/null
+++ b/packaging/docker/publish.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+#
+# Cross-arch docker publish helper script
+# Needs docker in version >18.02 due to usage of manifests
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
+ echo "This mechanism currently can only run on BASH version 4 and above"
+ exit 1
+fi
+
+WORKDIR="$(mktemp -d)" # Temporary folder, removed after script is done
+VERSION="$1"
+declare -A ARCH_MAP
+ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64")
+DEVEL_ARCHS=(amd64)
+[ "${ARCHS}" ] || ARCHS="${!ARCH_MAP[@]}" # Use default ARCHS unless ARCHS are externally provided
+DOCKER_CMD="docker --config ${WORKDIR}"
+GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
+GIT_USER=${GIT_USER:-"netdatabot"}
+
+if [ -z ${REPOSITORY} ]; then
+ REPOSITORY="${TRAVIS_REPO_SLUG}"
+ if [ -z ${REPOSITORY} ]; then
+ echo "REPOSITORY not set, publish cannot proceed"
+ exit 1
+ else
+ echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}"
+ fi
+fi
+
+# When development mode is set, build on DEVEL_ARCHS
+if [ ! -z ${DEVEL+x} ]; then
+ declare -a ARCHS=(${DEVEL_ARCHS[@]})
+fi
+
+# Ensure there is a version, the most appropriate one
+if [ "${VERSION}" == "" ]; then
+ VERSION=$(git tag --points-at)
+ if [ "${VERSION}" == "" ]; then
+ VERSION="latest"
+ fi
+fi
+MANIFEST_LIST="${REPOSITORY}:${VERSION}"
+
+# There is no reason to continue if we cannot log in to docker hub
+if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then
+ echo "No docker hub username or password found, aborting without publishing"
+ exit 1
+fi
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
+ echo "Docker build process aborted"
+ exit 1
+fi
+
+echo "Docker image publishing in progress.."
+echo "Version : ${VERSION}"
+echo "Repository : ${REPOSITORY}"
+echo "Architectures : ${ARCHS[*]}"
+echo "Manifest list : ${MANIFEST_LIST}"
+
+# Create temporary docker CLI config with experimental features enabled (manifests v2 need it)
+echo '{"experimental":"enabled"}' > "${WORKDIR}"/config.json
+
+# Login to docker hub to allow futher operations
+echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin
+
+# Push images to registry
+for ARCH in ${ARCHS[@]}; do
+ TAG="${MANIFEST_LIST}-${ARCH}"
+ echo "Publishing image ${TAG}.."
+ $DOCKER_CMD push "${TAG}" &
+ echo "Image ${TAG} published succesfully!"
+done
+
+echo "Waiting for images publishing to complete"
+wait
+
+# Recreate docker manifest list
+echo "Getting tag list for version '${VERSION}'.."
+TAGS=($(curl -s https://registry.hub.docker.com/v2/repositories/${REPOSITORY}/tags/ | jq -r '.results[]["name"]' | grep "^${VERSION}-"))
+
+echo "Creating manifest list.."
+$DOCKER_CMD manifest create --amend "${MANIFEST_LIST}" "${TAGS[@]/#/${REPOSITORY}:}"
+
+# Annotate manifest with CPU architecture information
+
+echo "Executing manifest annotate.."
+for TAG in "${TAGS[@]}"; do
+ ARCH="${TAG#${VERSION}-}"
+ echo "Annotating manifest for $ARCH, with TAG: ${REPOSITORY}:${TAG} (Manifest list: ${MANIFEST_LIST})"
+ $DOCKER_CMD manifest annotate "${MANIFEST_LIST}" "${REPOSITORY}:${TAG}" --os linux --arch "${ARCH_MAP[$ARCH]}"
+done
+
+# Push manifest to docker hub
+echo "Pushing manifest list to docker.."
+$DOCKER_CMD manifest push -p "${MANIFEST_LIST}"
+
+# Show current manifest (debugging purpose only)
+echo "Evaluating manifest list entry"
+$DOCKER_CMD manifest inspect "${MANIFEST_LIST}"
+
+# Cleanup
+rm -r "${WORKDIR}"
+
+echo "Docker publishing process completed!"
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
new file mode 100755
index 000000000..f4377d458
--- /dev/null
+++ b/packaging/docker/run.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+#
+# Entry point script for netdata
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+set -e
+
+echo "Netdata entrypoint script starting"
+if [ ${RESCRAMBLE+x} ]; then
+ echo "Reinstalling all packages to get the latest Polymorphic Linux scramble"
+ apk upgrade --update-cache --available
+fi
+
+if [ -n "${PGID}" ]; then
+ echo "Creating docker group ${PGID}"
+ addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
+ echo "Assign netdata user to docker group ${PGID}"
+ usermod -a -G ${PGID} ${DOCKER_USR} || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
+fi
+
+exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_PORT}" "$@"
+
+echo "Netdata entrypoint script, completed!"
diff --git a/packaging/installer/README.md b/packaging/installer/README.md
index b860b00f9..3d404e779 100644
--- a/packaging/installer/README.md
+++ b/packaging/installer/README.md
@@ -342,28 +342,31 @@ cd netdata
##### pfSense
-To install Netdata on pfSense run the following commands (within a shell or under Diagnostics/Command Prompt within the pfSense web interface).
+To install Netdata on pfSense, run the following commands (within a shell or under the **Diagnostics/Command** prompt within the pfSense web interface).
-Change platform (i386/amd64, etc) and FreeBSD versions (10/11, etc) according to your environment and change Netdata version (1.10.0 in example) according to latest version present within the FreeSBD repository:-
-
-Note first three packages are downloaded from the pfSense repository for maintaining compatibility with pfSense, Netdata is downloaded from the FreeBSD repository.
+Note that the first four packages are downloaded from the pfSense repository for maintaining compatibility with pfSense, Netdata and Python are downloaded from the FreeBSD repository.
```sh
pkg install pkgconf
pkg install bash
pkg install e2fsprogs-libuuid
-pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/python36-3.6.8_2.txz
-pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.13.0.txz
+pkg install libuv
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/python36-3.6.9.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.15.0.txz
```
+**Note:** If you receive a ` Not Found` error during the last two commands above, you will either need to manually look in the [repo folder](http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/) for the latest available package and use its URL instead, or you can try manually changing the netdata version in the URL to the latest version.
+
+You must edit `/usr/local/etc/netdata/netdata.conf` and change `bind to = 127.0.0.1` to `bind to = 0.0.0.0`.
+
+To start Netdata manually, run `service netdata onestart`
-To start Netdata manually run `service netdata onestart`
+Visit the Netdata dashboard to confirm it's working: `http://<pfsenseIP>:19999`
-To start Netdata automatically at each boot add `service netdata onestart` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**).
-Shellcmd Type should be set to `Shellcmd`.
+To start Netdata automatically every boot, add `service netdata onestart` as a Shellcmd entry within the pfSense web interface under **Services/Shellcmd**. You'll need to install the Shellcmd package beforehand under **System/Package Manager/Available Packages**. The Shellcmd Type should be set to `Shellcmd`.
![](https://i.imgur.com/wcKiPe1.png)
Alternatively more information can be found in <https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages>, for achieving the same via the command line and scripts.
-If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from <https://redmine.pfsense.org/issues/6643>
+If you experience an issue with `/usr/bin/install` being absent in pfSense 2.3 or earlier, update pfSense or use a workaround from <https://redmine.pfsense.org/issues/6643>
**Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata`
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
new file mode 100644
index 000000000..d82c1b318
--- /dev/null
+++ b/packaging/maintainers/README.md
@@ -0,0 +1,80 @@
+# Package Maintainers
+
+This page tracks the package maintainers for Netdata, for various operating systems and versions.
+
+> Feel free to update it, so that it reflects the current status.
+
+---
+
+## Official Linux Distributions
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) |
+| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) |
+| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) |
+| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) |
+| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) |
+| Ubuntu | | | |
+| Red Hat / Fedora / Centos | | | |
+| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) |
+
+---
+
+## FreeBSD
+
+| System | Initial PR | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| FreeBSD | #1321 | @vlvkobal|@mmokhi
+
+---
+
+## MacOS
+
+| System | URL | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
+
+---
+
+## Unofficial Linux Packages
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 |
+---
+
+## Embedded Linux
+
+| Embedded Linux | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 |
+| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) |
+| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata |
+| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 |
+| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi |
+
+---
+
+## Linux Containers
+
+| Containers | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Docker | Git | @titpetric | https://github.com/titpetric/netdata |
+
+---
+
+## Automation Systems
+
+| Automation Systems | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ |
+| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook |
+
+---
+
+## Packages summary from repology.org
+
+[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fmaintainers%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md
new file mode 100644
index 000000000..b1dab7e0c
--- /dev/null
+++ b/packaging/makeself/README.md
@@ -0,0 +1,48 @@
+# Netdata static binary build
+
+To build the static binary 64-bit distribution package, run:
+
+```bash
+cd /path/to/netdata.git
+./packaging/makeself/build-x86_64-static.sh
+```
+
+The program will:
+
+1. setup a new docker container with Alpine Linux
+2. install the required alpine packages (the build environment, needed libraries, etc)
+3. download and compile third party apps that are packaged with Netdata (`bash`, `curl`, etc)
+4. compile Netdata
+
+Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the Netdata binary package that can be run to install Netdata on any other computer.
+
+---
+
+## building binaries with debug info
+
+To build Netdata binaries with debugging / tracing information in them, use:
+
+```bash
+cd /path/to/netdata.git
+./packaging/makeself/build-x86_64-static.sh debug
+```
+
+These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information).
+
+### debugging Netdata binaries
+
+Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start Netdata:
+
+```bash
+PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D
+```
+
+The above command, will run Netdata under `valgrind`. While Netdata runs under `valgrind` it will be 10x slower and use a lot more memory.
+
+If Netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know.
+
+To stop Netdata while it runs under `valgrind`, press Control-C on the console.
+
+> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static Netdata binary, all the required libraries are built into Netdata. So, valgrind cannot appply its heuristics and prints them.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fmakeself%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh
new file mode 100755
index 000000000..69ddf2bf5
--- /dev/null
+++ b/packaging/makeself/build-x86_64-static.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "$0")/../installer/functions.sh || exit 1
+
+set -e
+
+DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine37"
+
+if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1
+then
+ # To run interactively:
+ # sudo docker run -it netdata-package-x86_64-static /bin/sh
+ # (add -v host-dir:guest-dir:rw arguments to mount volumes)
+ #
+ # To remove images in order to re-create:
+ # sudo docker rm -v $(sudo docker ps -a -q -f status=exited)
+ # sudo docker rmi netdata-package-x86_64-static
+ #
+ # This command maps the current directory to
+ # /usr/src/netdata.git
+ # inside the container and runs the script install-alpine-packages.sh
+ # (also inside the container)
+ #
+ run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.7 \
+ /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh
+
+ # save the changes made permanently
+ id=$(sudo docker ps -l -q)
+ run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}"
+fi
+
+# Run the build script inside the container
+run sudo docker run -a stdin -a stdout -a stderr -i -t -v \
+ $(pwd):/usr/src/netdata.git:rw \
+ "${DOCKER_CONTAINER_NAME}" \
+ /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}"
+
+if [ "${USER}" ]
+ then
+ sudo chown -R "${USER}" .
+fi
diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh
new file mode 100755
index 000000000..e5804c523
--- /dev/null
+++ b/packaging/makeself/build.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+# parse command line arguments
+
+export NETDATA_BUILD_WITH_DEBUG=0
+
+while [ ! -z "${1}" ]
+do
+ case "${1}" in
+ debug)
+ export NETDATA_BUILD_WITH_DEBUG=1
+ ;;
+
+ *)
+ ;;
+ esac
+
+ shift
+done
+
+
+# -----------------------------------------------------------------------------
+
+# First run install-alpine-packages.sh under alpine linux to install
+# the required packages. build-x86_64-static.sh will do this for you
+# using docker.
+
+cd $(dirname "$0") || exit 1
+
+# if we don't run inside the netdata repo
+# download it and run from it
+if [ ! -f ../../netdata-installer.sh ]
+then
+ git clone https://github.com/netdata/netdata.git netdata.git || exit 1
+ cd netdata.git/makeself || exit 1
+ ./build.sh "$@"
+ exit $?
+fi
+
+cat >&2 <<EOF
+
+This program will create a self-extracting shell package containing
+a statically linked netdata, able to run on any 64bit Linux system,
+without any dependencies from the target system.
+
+It can be used to have netdata running in no-time, or in cases the
+target Linux system cannot compile netdata.
+
+EOF
+
+# read -p "Press ENTER to continue > "
+
+if [ ! -d tmp ]
+ then
+ mkdir tmp || exit 1
+fi
+
+./run-all-jobs.sh "$@"
+exit $?
diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh
new file mode 100755
index 000000000..a0b72223d
--- /dev/null
+++ b/packaging/makeself/functions.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+
+# allow running the jobs by hand
+[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0
+[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.."
+[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+export NULL=
+
+# make sure the path does not end with /
+if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ]
+ then
+ export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}"
+fi
+
+# find the parent directory
+export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")"
+
+# -----------------------------------------------------------------------------
+
+# bash strict mode
+set -euo pipefail
+
+# -----------------------------------------------------------------------------
+
+fetch() {
+ local dir="${1}" url="${2}"
+ local tar="${dir}.tar.gz"
+
+ if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]
+ then
+ run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}"
+ fi
+
+ if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]
+ then
+ cd "${NETDATA_MAKESELF_PATH}/tmp"
+ run tar -zxpf "${tar}"
+ cd -
+ fi
+
+ run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
+}
+
+# -----------------------------------------------------------------------------
+
+# load the functions of the netdata-installer.sh
+. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh"
+
+# -----------------------------------------------------------------------------
+
+# debug
+echo "ME=${0}"
+echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}"
+echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}"
+echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}"
+echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}"
+echo "PROCESSORS=$(find_processors)"
diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh
new file mode 100755
index 000000000..bcb971f8f
--- /dev/null
+++ b/packaging/makeself/install-alpine-packages.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env sh
+#
+# Installation script for the alpine host
+# to prepare the static binary
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
+
+# Packaging update
+apk update
+
+# Add required APK packages
+apk add --no-cache \
+ bash \
+ wget \
+ curl \
+ ncurses \
+ git \
+ netcat-openbsd \
+ alpine-sdk \
+ autoconf \
+ automake \
+ gcc \
+ make \
+ libtool \
+ pkgconfig \
+ util-linux-dev \
+ openssl-dev \
+ gnutls-dev \
+ zlib-dev \
+ libmnl-dev \
+ libnetfilter_acct-dev \
+ libuv-dev \
+ lz4-dev \
+ openssl-dev \
+ || exit 1
+
+# Judy doesnt seem to be available on the repositories, download manually and install it
+export JUDY_VER="1.0.5"
+wget -O /judy.tar.gz http://downloads.sourceforge.net/project/judy/judy/Judy-${JUDY_VER}/Judy-${JUDY_VER}.tar.gz
+cd /
+tar -xf judy.tar.gz
+rm judy.tar.gz
+cd /judy-${JUDY_VER}
+CFLAGS="-O2 -s" CXXFLAGS="-O2 -s" ./configure
+make
+make install;
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
new file mode 100755
index 000000000..9796eb085
--- /dev/null
+++ b/packaging/makeself/install-or-update.sh
@@ -0,0 +1,243 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/functions.sh
+
+export LC_ALL=C
+umask 002
+
+# Be nice on production environments
+renice 19 $$ >/dev/null 2>/dev/null
+
+# -----------------------------------------------------------------------------
+if [ -d /opt/netdata/etc/netdata.old ]; then
+ progress "Found old etc/netdata directory, reinstating this"
+ [ -d /opt/netdata/etc/netdata.new ] && rm -rf /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata.old /opt/netdata/etc/netdata
+
+ progress "Trigger stock config clean up"
+ rm -f /opt/netdata/etc/netdata/.installer-cleanup-of-stock-configs-done
+fi
+
+STARTIT=1
+
+while [ ! -z "${1}" ]
+do
+ if [ "${1}" = "--dont-start-it" ]
+ then
+ STARTIT=0
+ else
+ echo >&2 "Unknown option '${1}'. Ignoring it."
+ fi
+ shift
+done
+
+deleted_stock_configs=0
+if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ]
+then
+
+ # -----------------------------------------------------------------------------
+ progress "Deleting stock configuration files from user configuration directory"
+
+ declare -A configs_signatures=()
+ source "system/configs.signatures"
+
+ if [ ! -d etc/netdata ]
+ then
+ run mkdir -p etc/netdata
+ fi
+
+ md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)"
+ for x in $(find etc -type f)
+ do
+ # find it relative filename
+ f="${x/etc\/netdata\//}"
+
+ # find the stock filename
+ t="${f/.conf.old/.conf}"
+ t="${t/.conf.orig/.conf}"
+
+ if [ ! -z "${md5sum}" ]
+ then
+ # find the checksum of the existing file
+ md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)"
+ #echo >&2 "md5: ${md5}"
+
+ # check if it matches
+ if [ "${configs_signatures[${md5}]}" = "${t}" ]
+ then
+ # it matches the default
+ run rm -f "${x}"
+ deleted_stock_configs=$(( deleted_stock_configs + 1 ))
+ fi
+ fi
+ done
+
+ touch "etc/netdata/.installer-cleanup-of-stock-configs-done"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Attempt to create user/group netdata/netadata"
+
+NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody"
+NETDATA_ADDED_TO_GROUPS=""
+# Default user/group
+NETDATA_USER="root"
+NETDATA_GROUP="root"
+
+if portable_add_group netdata; then
+ if portable_add_user netdata "/opt/netdata"; then
+ progress "Add user netdata to required user groups"
+ for g in ${NETDATA_WANTED_GROUPS}; do
+ # shellcheck disable=SC2086
+ portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" || run_failed "Failed to add netdata user to secondary groups"
+ done
+ NETDATA_USER="netdata"
+ NETDATA_GROUP="netdata"
+ else
+ run_failed "I could not add user netdata, will be using root"
+ fi
+else
+ run_failed "I could not add group netdata, so no user netdata will be created as well. Netdata run as root:root"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Check SSL certificates paths"
+
+if [ ! -f "/etc/ssl/certs/ca-certificates.crt" ]
+then
+ if [ ! -f /opt/netdata/.curlrc ]
+ then
+ cacert=
+
+ # CentOS
+ [ -f "/etc/ssl/certs/ca-bundle.crt" ] && cacert="/etc/ssl/certs/ca-bundle.crt"
+
+ if [ ! -z "${cacert}" ]
+ then
+ echo "Creating /opt/netdata/.curlrc with cacert=${cacert}"
+ echo >/opt/netdata/.curlrc "cacert=${cacert}"
+ else
+ run_failed "Failed to find /etc/ssl/certs/ca-certificates.crt"
+ fi
+ fi
+fi
+
+
+# -----------------------------------------------------------------------------
+progress "Install logrotate configuration for netdata"
+
+install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
+
+
+# -----------------------------------------------------------------------------
+progress "Install netdata at system init"
+
+install_netdata_service || run_failed "Cannot install netdata init service."
+
+
+# -----------------------------------------------------------------------------
+progress "creating quick links"
+
+dir_should_be_link() {
+ local p="${1}" t="${2}" d="${3}" old
+
+ old="${PWD}"
+ cd "${p}" || return 0
+
+ if [ -e "${d}" ]
+ then
+ if [ -h "${d}" ]
+ then
+ run rm "${d}"
+ else
+ run mv -f "${d}" "${d}.old.$$"
+ fi
+ fi
+
+ run ln -s "${t}" "${d}"
+ cd "${old}"
+}
+
+dir_should_be_link . bin sbin
+dir_should_be_link usr ../bin bin
+dir_should_be_link usr ../bin sbin
+dir_should_be_link usr . local
+
+dir_should_be_link . etc/netdata netdata-configs
+dir_should_be_link . usr/share/netdata/web netdata-web-files
+dir_should_be_link . usr/libexec/netdata netdata-plugins
+dir_should_be_link . var/lib/netdata netdata-dbs
+dir_should_be_link . var/cache/netdata netdata-metrics
+dir_should_be_link . var/log/netdata netdata-logs
+
+dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig
+
+if [ ${deleted_stock_configs} -gt 0 ]
+then
+ dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
+fi
+
+
+# -----------------------------------------------------------------------------
+
+progress "create user config directories"
+
+for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" "custom-plugins.d" "ssl"
+do
+ if [ ! -d "etc/netdata/${x}" ]
+ then
+ run mkdir -p "etc/netdata/${x}" || exit 1
+ fi
+done
+
+
+# -----------------------------------------------------------------------------
+progress "fix permissions"
+
+run chmod g+rx,o+rx /opt
+run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata
+
+
+# -----------------------------------------------------------------------------
+
+progress "fix plugin permissions"
+
+for x in apps.plugin freeipmi.plugin ioping cgroup-network
+do
+ f="usr/libexec/netdata/plugins.d/${x}"
+
+ if [ -f "${f}" ]
+ then
+ run chown root:${NETDATA_GROUP} "${f}"
+ run chmod 4750 "${f}"
+ fi
+done
+
+# fix the fping binary
+if [ -f bin/fping ]
+then
+ run chown root:${NETDATA_GROUP} bin/fping
+ run chmod 4750 bin/fping
+fi
+
+
+# -----------------------------------------------------------------------------
+
+if [ ${STARTIT} -eq 0 ]; then
+ create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf"
+ netdata_banner "is installed now!"
+else
+ progress "starting netdata"
+
+ if ! restart_netdata "/opt/netdata/bin/netdata"; then
+ create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf"
+ netdata_banner "is installed and running now!"
+ else
+ create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf"
+ netdata_banner "is installed now!"
+ fi
+fi
+run chown "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf"
+run chmod 0664 "/opt/netdata/etc/netdata/netdata.conf"
diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh
new file mode 100755
index 000000000..06dc82f29
--- /dev/null
+++ b/packaging/makeself/jobs/10-prepare-destination.install.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old"
+[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old"
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin"
+run mkdir -p "${NETDATA_INSTALL_PATH}/usr"
+run cd "${NETDATA_INSTALL_PATH}"
+run ln -s bin sbin
+run cd "${NETDATA_INSTALL_PATH}/usr"
+run ln -s ../bin bin
+run ln -s ../sbin sbin
+run ln -s . local
diff --git a/packaging/makeself/jobs/50-bash-4.4.18.install.sh b/packaging/makeself/jobs/50-bash-4.4.18.install.sh
new file mode 100755
index 000000000..a762d37ae
--- /dev/null
+++ b/packaging/makeself/jobs/50-bash-4.4.18.install.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz"
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --without-bash-malloc \
+ --enable-static-link \
+ --enable-net-redirections \
+ --enable-array-variables \
+ --disable-profiling \
+ --disable-nls \
+# --disable-rpath \
+# --enable-alias \
+# --enable-arith-for-command \
+# --enable-array-variables \
+# --enable-brace-expansion \
+# --enable-casemod-attributes \
+# --enable-casemod-expansions \
+# --enable-command-timing \
+# --enable-cond-command \
+# --enable-cond-regexp \
+# --enable-directory-stack \
+# --enable-dparen-arithmetic \
+# --enable-function-import \
+# --enable-glob-asciiranges-default \
+# --enable-help-builtin \
+# --enable-job-control \
+# --enable-net-redirections \
+# --enable-process-substitution \
+# --enable-progcomp \
+# --enable-prompt-string-decoding \
+# --enable-readline \
+# --enable-select \
+
+
+run make clean
+run make -j$(find_processors)
+
+cat >examples/loadables/Makefile <<EOF
+all:
+clean:
+install:
+EOF
+
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/bash
+fi
diff --git a/packaging/makeself/jobs/50-curl-7.60.0.install.sh b/packaging/makeself/jobs/50-curl-7.60.0.install.sh
new file mode 100755
index 000000000..c91598251
--- /dev/null
+++ b/packaging/makeself/jobs/50-curl-7.60.0.install.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "curl-curl-7_60_0" "https://github.com/curl/curl/archive/curl-7_60_0.tar.gz"
+
+export LDFLAGS="-static"
+export PKG_CONFIG="pkg-config --static"
+
+run ./buildconf
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --enable-optimize \
+ --disable-shared \
+ --enable-static \
+ --enable-http \
+ --enable-proxy \
+ --enable-ipv6 \
+ --enable-cookies \
+ ${NULL}
+
+# Curl autoconf does not honour the curl_LDFLAGS environment variable
+run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile
+
+run make clean
+run make -j$(find_processors)
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/curl
+fi
diff --git a/packaging/makeself/jobs/50-fping-4.2.install.sh b/packaging/makeself/jobs/50-fping-4.2.install.sh
new file mode 100755
index 000000000..a137753d8
--- /dev/null
+++ b/packaging/makeself/jobs/50-fping-4.2.install.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "fping-4.2" "https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz"
+
+export CFLAGS="-static"
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --enable-ipv4 \
+ --enable-ipv6 \
+ ${NULL}
+
+cat >doc/Makefile <<EOF
+all:
+clean:
+install:
+EOF
+
+run make clean
+run make -j$(find_processors)
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/fping
+fi
diff --git a/packaging/makeself/jobs/50-ioping-1.1.install.sh b/packaging/makeself/jobs/50-ioping-1.1.install.sh
new file mode 100755
index 000000000..83c778c15
--- /dev/null
+++ b/packaging/makeself/jobs/50-ioping-1.1.install.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "netdata-ioping-43d15a5" "https://github.com/netdata/ioping/tarball/master"
+
+export CFLAGS="-static"
+
+run make clean
+run make -j$(find_processors)
+run mkdir -p ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/
+run install -o root -g root -m 4750 ioping ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/ioping
+fi
diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh
new file mode 100755
index 000000000..80fba3158
--- /dev/null
+++ b/packaging/makeself/jobs/70-netdata-git.install.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. ${NETDATA_MAKESELF_PATH}/functions.sh "${@}" || exit 1
+
+cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ export CFLAGS="-static -O3"
+else
+ export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1"
+# export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness"
+fi
+
+# We export this to 'yes', installer sets this to .environment.
+# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one
+export IS_NETDATA_STATIC_BINARY="yes"
+
+run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \
+ --dont-wait \
+ --dont-start-it \
+ ${NULL}
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/netdata
+ run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin
+ run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/cgroup-network
+fi
diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh
new file mode 100755
index 000000000..f3056e6ac
--- /dev/null
+++ b/packaging/makeself/jobs/99-makeself.install.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+run cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+# -----------------------------------------------------------------------------
+# find the netdata version
+
+VERSION="$(git describe 2>/dev/null)"
+if [ -z "${VERSION}" ]; then
+ VERSION=$(cat packaging/version)
+fi
+
+if [ "${VERSION}" == "" ]; then
+ echo >&2 "Cannot find version number. Create makeself executable from source code with git tree structure."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# copy the files needed by makeself installation
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/system"
+
+run cp \
+ packaging/makeself/post-installer.sh \
+ packaging/makeself/install-or-update.sh \
+ packaging/installer/functions.sh \
+ configs.signatures \
+ system/netdata-init-d \
+ system/netdata-lsb \
+ system/netdata-openrc \
+ system/netdata.logrotate \
+ system/netdata.service \
+ "${NETDATA_INSTALL_PATH}/system/"
+
+
+# -----------------------------------------------------------------------------
+# create a wrapper to start our netdata with a modified path
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv"
+
+run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \
+ "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1
+
+cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <<EOF
+#!${NETDATA_INSTALL_PATH}/bin/bash
+export NETDATA_BASH_LOADABLES="DISABLE"
+export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}"
+exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}"
+EOF
+run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata"
+
+
+# -----------------------------------------------------------------------------
+# remove the links to allow untaring the archive
+
+run rm "${NETDATA_INSTALL_PATH}/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/bin" \
+ "${NETDATA_INSTALL_PATH}/usr/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/local"
+
+
+# -----------------------------------------------------------------------------
+# create the makeself archive
+
+run sed "s|NETDATA_VERSION|${VERSION}|g" <"${NETDATA_MAKESELF_PATH}/makeself.lsm" >"${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+run "${NETDATA_MAKESELF_PATH}/makeself.sh" \
+ --gzip \
+ --complevel 9 \
+ --notemp \
+ --needroot \
+ --target "${NETDATA_INSTALL_PATH}" \
+ --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \
+ --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \
+ --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \
+ --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \
+ "${NETDATA_INSTALL_PATH}" \
+ "${NETDATA_INSTALL_PATH}.gz.run" \
+ "netdata, the real-time performance and health monitoring system" \
+ ./system/post-installer.sh \
+ ${NULL}
+
+run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+# -----------------------------------------------------------------------------
+# copy it to the netdata build dir
+
+FILE="netdata-${VERSION}.gz.run"
+
+run mkdir -p artifacts
+run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}"
+
+[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run
+run ln -s "artifacts/${FILE}" netdata-latest.gz.run
+
+echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'"
diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh
new file mode 100755
index 000000000..43ddd7746
--- /dev/null
+++ b/packaging/makeself/makeself-header.sh
@@ -0,0 +1,557 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+cat << EOF > "$archname"
+#!/bin/sh
+# This script was generated using Makeself $MS_VERSION
+
+ORIG_UMASK=\`umask\`
+if test "$KEEP_UMASK" = n; then
+ umask 077
+fi
+
+CRCsum="$CRCsum"
+MD5="$MD5sum"
+TMPROOT=\${TMPDIR:=/tmp}
+USER_PWD="\$PWD"; export USER_PWD
+
+label="$LABEL"
+script="$SCRIPT"
+scriptargs="$SCRIPTARGS"
+licensetxt="$LICENSE"
+helpheader='$HELPHEADER'
+targetdir="$archdirname"
+filesizes="$filesizes"
+keep="$KEEP"
+nooverwrite="$NOOVERWRITE"
+quiet="n"
+accept="n"
+nodiskspace="n"
+export_conf="$EXPORT_CONF"
+
+print_cmd_arg=""
+if type printf > /dev/null; then
+ print_cmd="printf"
+elif test -x /usr/ucb/echo; then
+ print_cmd="/usr/ucb/echo"
+else
+ print_cmd="echo"
+fi
+
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:\$PATH
+ export PATH
+fi
+
+unset CDPATH
+
+MS_Printf()
+{
+ \$print_cmd \$print_cmd_arg "\$1"
+}
+
+MS_PrintLicense()
+{
+ if test x"\$licensetxt" != x; then
+ echo "\$licensetxt"
+ if test x"\$accept" != xy; then
+ while true
+ do
+ MS_Printf "Please type y to accept, n otherwise: "
+ read yn
+ if test x"\$yn" = xn; then
+ keep=n
+ eval \$finish; exit 1
+ break;
+ elif test x"\$yn" = xy; then
+ break;
+ fi
+ done
+ fi
+ fi
+}
+
+MS_diskspace()
+{
+ (
+ df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }'
+ )
+}
+
+MS_dd()
+{
+ blocks=\`expr \$3 / 1024\`
+ bytes=\`expr \$3 % 1024\`
+ dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\
+ { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\
+ test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null
+}
+
+MS_dd_Progress()
+{
+ if test x"\$noprogress" = xy; then
+ MS_dd \$@
+ return \$?
+ fi
+ file="\$1"
+ offset=\$2
+ length=\$3
+ pos=0
+ bsize=4194304
+ while test \$bsize -gt \$length; do
+ bsize=\`expr \$bsize / 4\`
+ done
+ blocks=\`expr \$length / \$bsize\`
+ bytes=\`expr \$length % \$bsize\`
+ (
+ dd ibs=\$offset skip=1 2>/dev/null
+ pos=\`expr \$pos \+ \$bsize\`
+ MS_Printf " 0%% " 1>&2
+ if test \$blocks -gt 0; then
+ while test \$pos -le \$length; do
+ dd bs=\$bsize count=1 2>/dev/null
+ pcent=\`expr \$length / 100\`
+ pcent=\`expr \$pos / \$pcent\`
+ if test \$pcent -lt 100; then
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ if test \$pcent -lt 10; then
+ MS_Printf " \$pcent%% " 1>&2
+ else
+ MS_Printf " \$pcent%% " 1>&2
+ fi
+ fi
+ pos=\`expr \$pos \+ \$bsize\`
+ done
+ fi
+ if test \$bytes -gt 0; then
+ dd bs=\$bytes count=1 2>/dev/null
+ fi
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ MS_Printf " 100%% " 1>&2
+ ) < "\$file"
+}
+
+MS_Help()
+{
+ cat << EOH >&2
+\${helpheader}Makeself version $MS_VERSION
+ 1) Getting help or info about \$0 :
+ \$0 --help Print this message
+ \$0 --info Print embedded info : title, default target directory, embedded script ...
+ \$0 --lsm Print embedded lsm entry (or no LSM)
+ \$0 --list Print the list of files in the archive
+ \$0 --check Checks integrity of the archive
+
+ 2) Running \$0 :
+ \$0 [options] [--] [additional arguments to embedded script]
+ with following options (in that order)
+ --confirm Ask before running embedded script
+ --quiet Do not print anything except error messages
+ --accept Accept the license
+ --noexec Do not run embedded script
+ --keep Do not erase target directory after running
+ the embedded script
+ --noprogress Do not show the progress during the decompression
+ --nox11 Do not spawn an xterm
+ --nochown Do not give the extracted files to the current user
+ --nodiskspace Do not check for available disk space
+ --target dir Extract directly to a target directory
+ directory path can be either absolute or relative
+ --tar arg1 [arg2 ...] Access the contents of the archive through the tar command
+ -- Following arguments will be passed to the embedded script
+EOH
+}
+
+MS_Check()
+{
+ OLD_PATH="\$PATH"
+ PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\`
+ PATH="\$OLD_PATH"
+
+ if test x"\$quiet" = xn; then
+ MS_Printf "Verifying archive integrity..."
+ fi
+ offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\`
+ verb=\$2
+ i=1
+ for s in \$filesizes
+ do
+ crc=\`echo \$CRCsum | cut -d" " -f\$i\`
+ if test -x "\$MD5_PATH"; then
+ if test x"\`basename \$MD5_PATH\`" = xdigest; then
+ MD5_ARG="-a md5"
+ fi
+ md5=\`echo \$MD5 | cut -d" " -f\$i\`
+ if test x"\$md5" = x00000000000000000000000000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2
+ else
+ md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`;
+ if test x"\$md5sum" != x"\$md5"; then
+ echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2
+ exit 2
+ else
+ test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2
+ fi
+ crc="0000000000"; verb=n
+ fi
+ fi
+ if test x"\$crc" = x0000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2
+ else
+ sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\`
+ if test x"\$sum1" = x"\$crc"; then
+ test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2
+ else
+ echo "Error in checksums: \$sum1 is different from \$crc" >&2
+ exit 2;
+ fi
+ fi
+ i=\`expr \$i + 1\`
+ offset=\`expr \$offset + \$s\`
+ done
+ if test x"\$quiet" = xn; then
+ echo " All good."
+ fi
+}
+
+UnTAR()
+{
+ if test x"\$quiet" = xn; then
+ tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; }
+ else
+ tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; }
+ fi
+}
+
+finish=true
+xterm_loop=
+noprogress=$NOPROGRESS
+nox11=$NOX11
+copy=$COPY
+ownership=y
+verbose=n
+
+initargs="\$@"
+
+while true
+do
+ case "\$1" in
+ -h | --help)
+ MS_Help
+ exit 0
+ ;;
+ -q | --quiet)
+ quiet=y
+ noprogress=y
+ shift
+ ;;
+ --accept)
+ accept=y
+ shift
+ ;;
+ --info)
+ echo Identification: "\$label"
+ echo Target directory: "\$targetdir"
+ echo Uncompressed size: $USIZE KB
+ echo Compression: $COMPRESS
+ echo Date of packaging: $DATE
+ echo Built with Makeself version $MS_VERSION on $OSTYPE
+ echo Build command was: "$MS_COMMAND"
+ if test x"\$script" != x; then
+ echo Script run after extraction:
+ echo " " \$script \$scriptargs
+ fi
+ if test x"$copy" = xcopy; then
+ echo "Archive will copy itself to a temporary location"
+ fi
+ if test x"$NEED_ROOT" = xy; then
+ echo "Root permissions required for extraction"
+ fi
+ if test x"$KEEP" = xy; then
+ echo "directory \$targetdir is permanent"
+ else
+ echo "\$targetdir will be removed after extraction"
+ fi
+ exit 0
+ ;;
+ --dumpconf)
+ echo LABEL=\"\$label\"
+ echo SCRIPT=\"\$script\"
+ echo SCRIPTARGS=\"\$scriptargs\"
+ echo archdirname=\"$archdirname\"
+ echo KEEP=$KEEP
+ echo NOOVERWRITE=$NOOVERWRITE
+ echo COMPRESS=$COMPRESS
+ echo filesizes=\"\$filesizes\"
+ echo CRCsum=\"\$CRCsum\"
+ echo MD5sum=\"\$MD5\"
+ echo OLDUSIZE=$USIZE
+ echo OLDSKIP=`expr $SKIP + 1`
+ exit 0
+ ;;
+ --lsm)
+cat << EOLSM
+EOF
+eval "$LSM_CMD"
+cat << EOF >> "$archname"
+EOLSM
+ exit 0
+ ;;
+ --list)
+ echo Target directory: \$targetdir
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --tar)
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ arg1="\$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@"
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --check)
+ MS_Check "\$0" y
+ exit 0
+ ;;
+ --confirm)
+ verbose=y
+ shift
+ ;;
+ --noexec)
+ script=""
+ shift
+ ;;
+ --keep)
+ keep=y
+ shift
+ ;;
+ --target)
+ keep=y
+ targetdir=\${2:-.}
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --noprogress)
+ noprogress=y
+ shift
+ ;;
+ --nox11)
+ nox11=y
+ shift
+ ;;
+ --nochown)
+ ownership=n
+ shift
+ ;;
+ --nodiskspace)
+ nodiskspace=y
+ shift
+ ;;
+ --xwin)
+ if test "$NOWAIT" = n; then
+ finish="echo Press Return to close this window...; read junk"
+ fi
+ xterm_loop=1
+ shift
+ ;;
+ --phase2)
+ copy=phase2
+ shift
+ ;;
+ --)
+ shift
+ break ;;
+ -*)
+ echo Unrecognized flag : "\$1" >&2
+ MS_Help
+ exit 1
+ ;;
+ *)
+ break ;;
+ esac
+done
+
+if test x"\$quiet" = xy -a x"\$verbose" = xy; then
+ echo Cannot be verbose and quiet at the same time. >&2
+ exit 1
+fi
+
+if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then
+ echo "Administrative privileges required for this archive (use su or sudo)" >&2
+ exit 1
+fi
+
+if test x"\$copy" \!= xphase2; then
+ MS_PrintLicense
+fi
+
+case "\$copy" in
+copy)
+ tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$
+ mkdir "\$tmpdir" || {
+ echo "Could not create temporary directory \$tmpdir" >&2
+ exit 1
+ }
+ SCRIPT_COPY="\$tmpdir/makeself"
+ echo "Copying to a temporary location..." >&2
+ cp "\$0" "\$SCRIPT_COPY"
+ chmod +x "\$SCRIPT_COPY"
+ cd "\$TMPROOT"
+ exec "\$SCRIPT_COPY" --phase2 -- \$initargs
+ ;;
+phase2)
+ finish="\$finish ; rm -rf \`dirname \$0\`"
+ ;;
+esac
+
+if test x"\$nox11" = xn; then
+ if tty -s; then # Do we have a terminal?
+ :
+ else
+ if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X?
+ if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable
+ GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology"
+ for a in \$GUESS_XTERMS; do
+ if type \$a >/dev/null 2>&1; then
+ XTERM=\$a
+ break
+ fi
+ done
+ chmod a+x \$0 || echo Please add execution rights on \$0
+ if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal!
+ exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs"
+ else
+ exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs"
+ fi
+ fi
+ fi
+ fi
+fi
+
+[ -d "\$targetdir/etc/netdata.old" ] && echo "Moving existing old directory" && mv "\$targetdir/etc/netdata.old" "\$targetdir/etc/netdata.old.$$"
+[ -d \$targetdir/etc/netdata ] && echo "Backing up existing directory" && cp -r \$targetdir/etc/netdata "\$targetdir/etc/netdata.old"
+
+if test x"\$targetdir" = x.; then
+ tmpdir="."
+else
+ if test x"\$keep" = xy; then
+ if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then
+ echo "Target directory \$targetdir already exists, aborting." >&2
+ exit 1
+ fi
+ if test x"\$quiet" = xn; then
+ echo "Creating directory \$targetdir" >&2
+ fi
+ tmpdir="\$targetdir"
+ dashp="-p"
+ else
+ tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM"
+ dashp=""
+ fi
+ mkdir \$dashp \$tmpdir || {
+ echo 'Cannot create target directory' \$tmpdir >&2
+ echo 'You should try option --target dir' >&2
+ eval \$finish
+ exit 1
+ }
+fi
+
+location="\`pwd\`"
+if test x"\$SETUP_NOCHECK" != x1; then
+ MS_Check "\$0"
+fi
+offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+
+if test x"\$verbose" = xy; then
+ MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] "
+ read yn
+ if test x"\$yn" = xn; then
+ eval \$finish; exit 1
+ fi
+fi
+
+if test x"\$quiet" = xn; then
+ MS_Printf "Uncompressing \$label"
+fi
+res=3
+if test x"\$keep" = xn; then
+ trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15
+fi
+
+if test x"\$nodiskspace" = xn; then
+ leftspace=\`MS_diskspace \$tmpdir\`
+ if test -n "\$leftspace"; then
+ if test "\$leftspace" -lt $USIZE; then
+ echo
+ echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2
+ echo "Use --nodiskspace option to skip this check and proceed anyway" >&2
+ if test x"\$keep" = xn; then
+ echo "Consider setting TMPDIR to a directory with more free space."
+ fi
+ eval \$finish; exit 1
+ fi
+ fi
+fi
+
+for s in \$filesizes
+do
+ if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then
+ if test x"\$ownership" = xy; then
+ (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .)
+ fi
+ else
+ echo >&2
+ echo "Unable to decompress \$0" >&2
+ eval \$finish; exit 1
+ fi
+ offset=\`expr \$offset + \$s\`
+done
+if test x"\$quiet" = xn; then
+ echo
+fi
+
+cd "\$tmpdir"
+res=0
+if test x"\$script" != x; then
+ if test x"\$export_conf" = x"y"; then
+ MS_BUNDLE="\$0"
+ MS_LABEL="\$label"
+ MS_SCRIPT="\$script"
+ MS_SCRIPTARGS="\$scriptargs"
+ MS_ARCHDIRNAME="\$archdirname"
+ MS_KEEP="\$KEEP"
+ MS_NOOVERWRITE="\$NOOVERWRITE"
+ MS_COMPRESS="\$COMPRESS"
+ export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS
+ export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS
+ fi
+
+ if test x"\$verbose" = x"y"; then
+ MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] "
+ read yn
+ if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?;
+ fi
+ else
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?
+ fi
+ if test "\$res" -ne 0; then
+ test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2
+ fi
+fi
+if test x"\$keep" = xn; then
+ cd \$TMPROOT
+ /bin/rm -rf \$tmpdir
+fi
+eval \$finish; exit \$res
+EOF
diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt
new file mode 100644
index 000000000..bf482c465
--- /dev/null
+++ b/packaging/makeself/makeself-help-header.txt
@@ -0,0 +1,44 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /lib/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE.md
diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt
new file mode 100644
index 000000000..bf482c465
--- /dev/null
+++ b/packaging/makeself/makeself-license.txt
@@ -0,0 +1,44 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /lib/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE.md
diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm
new file mode 100644
index 000000000..6bd4703db
--- /dev/null
+++ b/packaging/makeself/makeself.lsm
@@ -0,0 +1,16 @@
+Begin3
+Title: netdata
+Version: NETDATA_VERSION
+Description: netdata is a system for distributed real-time performance and health monitoring.
+ It provides unparalleled insights, in real-time, of everything happening on the
+ system it runs (including applications such as web and database servers), using
+ modern interactive web dashboards. netdata is fast and efficient, designed to
+ permanently run on all systems (physical & virtual servers, containers, IoT
+ devices), without disrupting their core function.
+Keywords: real-time performance and health monitoring
+Author: Costa Tsaousis (costa@tsaousis.gr)
+Maintained-by: Costa Tsaousis (costa@tsaousis.gr)
+Original-site: https://my-netdata.io/
+Platform: Unix
+Copying-policy: GPL
+End
diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh
new file mode 100755
index 000000000..f3cb69976
--- /dev/null
+++ b/packaging/makeself/makeself.sh
@@ -0,0 +1,621 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Makeself version 2.3.x
+# by Stephane Peter <megastep@megastep.org>
+#
+# Utility to create self-extracting tar.gz archives.
+# The resulting archive is a file holding the tar.gz archive with
+# a small Shell script stub that uncompresses the archive to a temporary
+# directory and then executes a given script from withing that directory.
+#
+# Makeself home page: http://makeself.io/
+#
+# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain.
+#
+# Version history :
+# - 1.0 : Initial public release
+# - 1.1 : The archive can be passed parameters that will be passed on to
+# the embedded script, thanks to John C. Quillan
+# - 1.2 : Package distribution, bzip2 compression, more command line options,
+# support for non-temporary archives. Ideas thanks to Francois Petitjean
+# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean:
+# Support for no compression (--nocomp), script is no longer mandatory,
+# automatic launch in an xterm, optional verbose output, and -target
+# archive option to indicate where to extract the files.
+# - 1.4 : Improved UNIX compatibility (Francois Petitjean)
+# Automatic integrity checking, support of LSM files (Francois Petitjean)
+# - 1.5 : Many bugfixes. Optionally disable xterm spawning.
+# - 1.5.1 : More bugfixes, added archive options -list and -check.
+# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big
+# archives (Quake III demo)
+# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm.
+# More verbosity in xterms and check for embedded command's return value.
+# Bugfix for Debian 2.0 systems that have a different "print" command.
+# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed.
+# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to
+# bypass checksum verification of archives.
+# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon)
+# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports.
+# - 2.0.1 : Added --copy
+# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates.
+# Added --nochown for archives
+# Stopped doing redundant checksums when not necesary
+# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command
+# Cleaned up the code to handle error codes from compress. Simplified the extraction code.
+# - 2.1.2 : Some bug fixes. Use head -n to avoid problems.
+# - 2.1.3 : Bug fixes with command line when spawning terminals.
+# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive.
+# Added --noexec to prevent execution of embedded scripts.
+# Added --nomd5 and --nocrc to avoid creating checksums in archives.
+# Added command used to create the archive in --info output.
+# Run the embedded script through eval.
+# - 2.1.4 : Fixed --info output.
+# Generate random directory name when extracting files to . to avoid problems. (Jason Trent)
+# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent)
+# Avoid some race conditions (Ludwig Nussel)
+# Unset the $CDPATH variable to avoid problems if it is set. (Debian)
+# Better handling of dot files in the archive directory.
+# - 2.1.5 : Made the md5sum detection consistent with the header code.
+# Check for the presence of the archive directory
+# Added --encrypt for symmetric encryption through gpg (Eric Windisch)
+# Added support for the digest command on Solaris 10 for MD5 checksums
+# Check for available disk space before extracting to the target directory (Andreas Schweitzer)
+# Allow extraction to run asynchronously (patch by Peter Hatch)
+# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo)
+# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere)
+# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere)
+# Added --target dir to allow extracting directly to a target directory (Guy Baconniere)
+# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details.
+# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky)
+#
+# (C) 1998-2017 by Stephane Peter <megastep@megastep.org>
+#
+# This software is released under the terms of the GNU GPL version 2 and above
+# Please read the license at http://www.gnu.org/copyleft/gpl.html
+#
+
+MS_VERSION=2.3.1
+MS_COMMAND="$0"
+unset CDPATH
+
+for f in "${1+"$@"}"; do
+ MS_COMMAND="$MS_COMMAND \\\\
+ \\\"$f\\\""
+done
+
+# For Solaris systems
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:$PATH
+ export PATH
+fi
+
+# Procedures
+
+MS_Usage()
+{
+ echo "Usage: $0 [params] archive_dir file_name label startup_script [args]"
+ echo "params can be one or more of the following :"
+ echo " --version | -v : Print out Makeself version number and exit"
+ echo " --help | -h : Print out this help message"
+ echo " --tar-quietly : Suppress verbose output from the tar command"
+ echo " --quiet | -q : Do not print any messages other than errors."
+ echo " --gzip : Compress using gzip (default if detected)"
+ echo " --pigz : Compress with pigz"
+ echo " --bzip2 : Compress using bzip2 instead of gzip"
+ echo " --pbzip2 : Compress using pbzip2 instead of gzip"
+ echo " --xz : Compress using xz instead of gzip"
+ echo " --lzo : Compress using lzop instead of gzip"
+ echo " --lz4 : Compress using lz4 instead of gzip"
+ echo " --compress : Compress using the UNIX 'compress' command"
+ echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)"
+ echo " --base64 : Instead of compressing, encode the data using base64"
+ echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG"
+ echo " --gpg-asymmetric-encrypt-sign"
+ echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG"
+ echo " --gpg-extra opt : Append more options to the gpg command line"
+ echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL"
+ echo " --nocomp : Do not compress the data"
+ echo " --notemp : The archive will create archive_dir in the"
+ echo " current directory and uncompress in ./archive_dir"
+ echo " --needroot : Check that the root user is extracting the archive before proceeding"
+ echo " --copy : Upon extraction, the archive will first copy itself to"
+ echo " a temporary directory"
+ echo " --append : Append more files to an existing Makeself archive"
+ echo " The label and startup scripts will then be ignored"
+ echo " --target dir : Extract directly to a target directory"
+ echo " directory path can be either absolute or relative"
+ echo " --nooverwrite : Do not extract the archive if the specified target directory exists"
+ echo " --current : Files will be extracted to the current directory"
+ echo " Both --current and --target imply --notemp"
+ echo " --tar-extra opt : Append more options to the tar command line"
+ echo " --untar-extra opt : Append more options to the during the extraction of the tar archive"
+ echo " --nomd5 : Don't calculate an MD5 for archive"
+ echo " --nocrc : Don't calculate a CRC for archive"
+ echo " --header file : Specify location of the header script"
+ echo " --follow : Follow the symlinks in the archive"
+ echo " --noprogress : Do not show the progress during the decompression"
+ echo " --nox11 : Disable automatic spawn of a xterm"
+ echo " --nowait : Do not wait for user input after executing embedded"
+ echo " program from an xterm"
+ echo " --lsm file : LSM file describing the package"
+ echo " --license file : Append a license file"
+ echo " --help-header file : Add a header to the archive's --help output"
+ echo " --packaging-date date"
+ echo " : Use provided string as the packaging date"
+ echo " instead of the current date."
+ echo
+ echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive."
+ echo " --export-conf : Export configuration variables to startup_script"
+ echo
+ echo "Do not forget to give a fully qualified startup script name"
+ echo "(i.e. with a ./ prefix if inside the archive)."
+ exit 1
+}
+
+# Default settings
+if type gzip 2>&1 > /dev/null; then
+ COMPRESS=gzip
+else
+ COMPRESS=Unix
+fi
+COMPRESS_LEVEL=9
+KEEP=n
+CURRENT=n
+NOX11=n
+NOWAIT=n
+APPEND=n
+TAR_QUIETLY=n
+KEEP_UMASK=n
+QUIET=n
+NOPROGRESS=n
+COPY=none
+NEED_ROOT=n
+TAR_ARGS=cvf
+TAR_EXTRA=""
+GPG_EXTRA=""
+DU_ARGS=-ks
+HEADER=`dirname "$0"`/makeself-header.sh
+TARGETDIR=""
+NOOVERWRITE=n
+DATE=`LC_ALL=C date`
+EXPORT_CONF=n
+
+# LSM file stuff
+LSM_CMD="echo No LSM. >> \"\$archname\""
+
+while true
+do
+ case "$1" in
+ --version | -v)
+ echo Makeself version $MS_VERSION
+ exit 0
+ ;;
+ --pbzip2)
+ COMPRESS=pbzip2
+ shift
+ ;;
+ --bzip2)
+ COMPRESS=bzip2
+ shift
+ ;;
+ --gzip)
+ COMPRESS=gzip
+ shift
+ ;;
+ --pigz)
+ COMPRESS=pigz
+ shift
+ ;;
+ --xz)
+ COMPRESS=xz
+ shift
+ ;;
+ --lzo)
+ COMPRESS=lzo
+ shift
+ ;;
+ --lz4)
+ COMPRESS=lz4
+ shift
+ ;;
+ --compress)
+ COMPRESS=Unix
+ shift
+ ;;
+ --base64)
+ COMPRESS=base64
+ shift
+ ;;
+ --gpg-encrypt)
+ COMPRESS=gpg
+ shift
+ ;;
+ --gpg-asymmetric-encrypt-sign)
+ COMPRESS=gpg-asymmetric
+ shift
+ ;;
+ --gpg-extra)
+ GPG_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --ssl-encrypt)
+ COMPRESS=openssl
+ shift
+ ;;
+ --nocomp)
+ COMPRESS=none
+ shift
+ ;;
+ --complevel)
+ COMPRESS_LEVEL="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --notemp)
+ KEEP=y
+ shift
+ ;;
+ --copy)
+ COPY=copy
+ shift
+ ;;
+ --current)
+ CURRENT=y
+ KEEP=y
+ shift
+ ;;
+ --tar-extra)
+ TAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --untar-extra)
+ UNTAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --target)
+ TARGETDIR="$2"
+ KEEP=y
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --nooverwrite)
+ NOOVERWRITE=y
+ shift
+ ;;
+ --needroot)
+ NEED_ROOT=y
+ shift
+ ;;
+ --header)
+ HEADER="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --license)
+ LICENSE=`cat $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --follow)
+ TAR_ARGS=cvhf
+ DU_ARGS=-ksL
+ shift
+ ;;
+ --noprogress)
+ NOPROGRESS=y
+ shift
+ ;;
+ --nox11)
+ NOX11=y
+ shift
+ ;;
+ --nowait)
+ NOWAIT=y
+ shift
+ ;;
+ --nomd5)
+ NOMD5=y
+ shift
+ ;;
+ --nocrc)
+ NOCRC=y
+ shift
+ ;;
+ --append)
+ APPEND=y
+ shift
+ ;;
+ --lsm)
+ LSM_CMD="cat \"$2\" >> \"\$archname\""
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --packaging-date)
+ DATE="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --help-header)
+ HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER
+"
+ ;;
+ --tar-quietly)
+ TAR_QUIETLY=y
+ shift
+ ;;
+ --keep-umask)
+ KEEP_UMASK=y
+ shift
+ ;;
+ --export-conf)
+ EXPORT_CONF=y
+ shift
+ ;;
+ -q | --quiet)
+ QUIET=y
+ shift
+ ;;
+ -h | --help)
+ MS_Usage
+ ;;
+ -*)
+ echo Unrecognized flag : "$1"
+ MS_Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if test $# -lt 1; then
+ MS_Usage
+else
+ if test -d "$1"; then
+ archdir="$1"
+ else
+ echo "Directory $1 does not exist." >&2
+ exit 1
+ fi
+fi
+archname="$2"
+
+if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then
+ if test "$TAR_ARGS" = "cvf"; then
+ TAR_ARGS="cf"
+ elif test "$TAR_ARGS" = "cvhf";then
+ TAR_ARGS="chf"
+ fi
+fi
+
+if test "$APPEND" = y; then
+ if test $# -lt 2; then
+ MS_Usage
+ fi
+
+ # Gather the info from the original archive
+ OLDENV=`sh "$archname" --dumpconf`
+ if test $? -ne 0; then
+ echo "Unable to update archive: $archname" >&2
+ exit 1
+ else
+ eval "$OLDENV"
+ fi
+else
+ if test "$KEEP" = n -a $# = 3; then
+ echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2
+ echo >&2
+ MS_Usage
+ fi
+ # We don't want to create an absolute directory unless a target directory is defined
+ if test "$CURRENT" = y; then
+ archdirname="."
+ elif test x$TARGETDIR != x; then
+ archdirname="$TARGETDIR"
+ else
+ archdirname=`basename "$1"`
+ fi
+
+ if test $# -lt 3; then
+ MS_Usage
+ fi
+
+ LABEL="$3"
+ SCRIPT="$4"
+ test "x$SCRIPT" = x || shift 1
+ shift 3
+ SCRIPTARGS="$*"
+fi
+
+if test "$KEEP" = n -a "$CURRENT" = y; then
+ echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2
+ exit 1
+fi
+
+case $COMPRESS in
+gzip)
+ GZIP_CMD="gzip -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pigz)
+ GZIP_CMD="pigz -$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pbzip2)
+ GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+bzip2)
+ GZIP_CMD="bzip2 -$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+xz)
+ GZIP_CMD="xz -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="xz -d"
+ ;;
+lzo)
+ GZIP_CMD="lzop -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lzop -d"
+ ;;
+lz4)
+ GZIP_CMD="lz4 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lz4 -d"
+ ;;
+base64)
+ GZIP_CMD="base64"
+ GUNZIP_CMD="base64 -d -i"
+ ;;
+gpg)
+ GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL"
+ GUNZIP_CMD="gpg -d"
+ ;;
+gpg-asymmetric)
+ GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es"
+ GUNZIP_CMD="gpg --yes -d"
+ ;;
+openssl)
+ GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256"
+ GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256"
+ ;;
+Unix)
+ GZIP_CMD="compress -cf"
+ GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd"
+ ;;
+none)
+ GZIP_CMD="cat"
+ GUNZIP_CMD="cat"
+ ;;
+esac
+
+tmpfile="${TMPDIR:=/tmp}/mkself$$"
+
+if test -f "$HEADER"; then
+ oldarchname="$archname"
+ archname="$tmpfile"
+ # Generate a fake header to count its lines
+ SKIP=0
+ . "$HEADER"
+ SKIP=`cat "$tmpfile" |wc -l`
+ # Get rid of any spaces
+ SKIP=`expr $SKIP`
+ rm -f "$tmpfile"
+ if test "$QUIET" = "n";then
+ echo Header is $SKIP lines long >&2
+ fi
+
+ archname="$oldarchname"
+else
+ echo "Unable to open header file: $HEADER" >&2
+ exit 1
+fi
+
+if test "$QUIET" = "n";then
+ echo
+fi
+
+if test "$APPEND" = n; then
+ if test -f "$archname"; then
+ echo "WARNING: Overwriting existing file: $archname" >&2
+ fi
+fi
+
+USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'`
+
+if test "." = "$archdirname"; then
+ if test "$KEEP" = n; then
+ archdirname="makeself-$$-`date +%Y%m%d%H%M%S`"
+ fi
+fi
+
+test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; }
+if test "$QUIET" = "n";then
+ echo About to compress $USIZE KB of data...
+ echo Adding files to archive named \"$archname\"...
+fi
+exec 3<> "$tmpfile"
+( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \
+ { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; }
+exec 3>&- # try to close the archive
+
+fsize=`cat "$tmpfile" | wc -c | tr -d " "`
+
+# Compute the checksums
+
+md5sum=00000000000000000000000000000000
+crcsum=0000000000
+
+if test "$NOCRC" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping crc at user request"
+ fi
+else
+ crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1`
+ if test "$QUIET" = "n";then
+ echo "CRC: $crcsum"
+ fi
+fi
+
+if test "$NOMD5" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping md5sum at user request"
+ fi
+else
+ # Try to locate a MD5 binary
+ OLD_PATH=$PATH
+ PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest`
+ PATH=$OLD_PATH
+ if test -x "$MD5_PATH"; then
+ if test `basename ${MD5_PATH}`x = digestx; then
+ MD5_ARG="-a md5"
+ fi
+ md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`;
+ if test "$QUIET" = "n";then
+ echo "MD5: $md5sum"
+ fi
+ else
+ if test "$QUIET" = "n";then
+ echo "MD5: none, MD5 command not found"
+ fi
+ fi
+fi
+
+if test "$APPEND" = y; then
+ mv "$archname" "$archname".bak || exit
+
+ # Prepare entry for new archive
+ filesizes="$filesizes $fsize"
+ CRCsum="$CRCsum $crcsum"
+ MD5sum="$MD5sum $md5sum"
+ USIZE=`expr $USIZE + $OLDUSIZE`
+ # Generate the header
+ . "$HEADER"
+ # Append the original data
+ tail -n +$OLDSKIP "$archname".bak >> "$archname"
+ # Append the new data
+ cat "$tmpfile" >> "$archname"
+
+ chmod +x "$archname"
+ rm -f "$archname".bak
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully updated.
+ fi
+else
+ filesizes="$fsize"
+ CRCsum="$crcsum"
+ MD5sum="$md5sum"
+
+ # Generate the header
+ . "$HEADER"
+
+ # Append the compressed tar data after the stub
+ if test "$QUIET" = "n";then
+ echo
+ fi
+ cat "$tmpfile" >> "$archname"
+ chmod +x "$archname"
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully created.
+ fi
+fi
+rm -f "$tmpfile"
diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh
new file mode 100755
index 000000000..38cc41ef7
--- /dev/null
+++ b/packaging/makeself/post-installer.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This script is started using the shell of the system
+# and executes our 'install-or-update.sh' script
+# using the netdata supplied, statically linked BASH
+#
+# so, at 'install-or-update.sh' we are always sure
+# we run under BASH v4.
+
+./bin/bash system/install-or-update.sh "${@}"
diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh
new file mode 100755
index 000000000..f7507c2d2
--- /dev/null
+++ b/packaging/makeself/run-all-jobs.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+LC_ALL=C
+umask 002
+
+# be nice
+renice 19 $$ >/dev/null 2>/dev/null
+
+# -----------------------------------------------------------------------------
+# prepare the environment for the jobs
+
+# installation directory
+export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+
+# our source directory
+export NETDATA_MAKESELF_PATH="$(dirname "${0}")"
+if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ]
+ then
+ export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+fi
+
+# netdata source directory
+export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+
+# make sure ${NULL} is empty
+export NULL=
+
+# -----------------------------------------------------------------------------
+
+cd "${NETDATA_MAKESELF_PATH}" || exit 1
+
+. ./functions.sh "${@}" || exit 1
+
+for x in jobs/*.install.sh
+do
+ progress "running ${x}"
+ "${x}" "${NETDATA_INSTALL_PATH}"
+done
+
+echo >&2 "All jobs for static packaging done successfully."
+exit 0
diff --git a/packaging/manual_nightly_deployment.sh b/packaging/manual_nightly_deployment.sh
new file mode 100755
index 000000000..a0999bb18
--- /dev/null
+++ b/packaging/manual_nightly_deployment.sh
@@ -0,0 +1,127 @@
+#!/usr/bin/env bash
+#
+# This tool allows netdata team to manually deploy nightlies
+# It emulates the nightly operations required for a new version to be published for our users
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+#
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog generation process aborted"
+ exit 1
+fi
+
+if [ $# -lt 1 ] || [ $# -gt 2 ]; then
+ echo "Run as ./$(basename "$0") [docker|gcs]|all] from the top level directory of netdata GIT repository"
+ exit 1
+fi
+
+GSUTIL_BINARY=$(command -v gsutil 2> /dev/null)
+if [ -z "${GSUTIL_BINARY}" ]; then
+ echo "No gsutil utility available, you need gsutil deployed to manually deploy to GCS"
+ exit 1
+fi;
+
+# Function declarations
+publish_docker() {
+
+ # Ensure REPOSITORY present
+ if [ -z "${REPOSITORY}" ]; then
+ echo "Please provide the repository to deploy the containers:"
+ read -r REPOSITORY
+ export REPOSITORY
+ else
+ echo "Docker publishing to ${REPOSITORY}"
+ fi
+
+ # Ensure DOCKER_USERNAME present
+ if [ -z "${DOCKER_USERNAME}" ]; then
+ echo "For repository ${REPOSITORY}, Please provide the docker USERNAME to use:"
+ read -r DOCKER_USERNAME
+ export DOCKER_USERNAME
+ else
+ echo "Using docker username ${DOCKER_USERNAME}"
+ fi
+
+ # Ensure DOCKER_PASS present
+ if [ -z "${DOCKER_PASS}" ]; then
+ echo "Username ${DOCKER_USERNAME} received, now give me the password:"
+ read -r -s DOCKER_PASS
+ export DOCKER_PASS
+ else
+ echo "Docker password has already been set to env, using that"
+ fi
+
+ echo "Building Docker images.."
+ packaging/docker/build.sh
+
+ echo "Publishing Docker images.."
+ packaging/docker/publish.sh
+}
+
+publish_nightly_binaries() {
+ echo "Publishing nightly binaries to GCS"
+
+ echo "Please select the bucket to sync, from the ones available to you:"
+ bucket_list=$(${GSUTIL_BINARY} list | tr '\n' ' ')
+ declare -A buckets
+ idx=0
+ for abucket in ${bucket_list}; do
+ echo "${idx}. ${abucket}"
+ buckets["${idx}"]=${abucket}
+ ((idx=idx+1))
+ done
+ read -p"Selection>" -r -n 1 selected_bucket
+
+ echo "Ok!"
+ echo "Syncing artifacts directory contents with GCS bucket: ${buckets[${selected_bucket}]}"
+ if [ -d artifacts ]; then
+ ${GSUTIL_BINARY} -m rsync -r artifacts "${buckets["${selected_bucket}"]}"
+ echo "GCS Sync complete!"
+ else
+ echo "Directory artifacts does not exist, nothing to do on GCS"
+ fi
+}
+
+prepare_and_publish_gcs() {
+ # Prepare the artifacts directory
+ echo "Preparing artifacts directory contents"
+ .travis/create_artifacts.sh
+
+ # Publish it to GCS
+ publish_nightly_binaries
+
+ # Clean up
+ echo "Cleaning up repository"
+ make clean || echo "Nothing to clean"
+ make distclean || echo "Nothing to distclean"
+ rm -rf artifacts
+}
+
+# Mandatory variable declarations
+export TRAVIS_REPO_SLUG="netdata/netdata"
+
+echo "Manual nightly deployment procedure started"
+case "$1" in
+ "docker")
+ publish_docker
+ ;;
+ "gcs")
+ prepare_and_publish_gcs
+ ;;
+ "all")
+ publish_docker
+ prepare_and_publish_gcs
+ ;;
+ *)
+ echo "ERROR: Invalid request parameter $1. Valid values are: docker, gcs, all"
+ ;;
+esac
+echo "Manual nightly deployment completed!"
diff --git a/packaging/version b/packaging/version
index 07c3efad8..7893098b6 100644
--- a/packaging/version
+++ b/packaging/version
@@ -1 +1 @@
-v1.17.0
+v1.17.1