summaryrefslogtreecommitdiffstats
path: root/packaging
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:57:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:57:58 +0000
commitbe1c7e50e1e8809ea56f2c9d472eccd8ffd73a97 (patch)
tree9754ff1ca740f6346cf8483ec915d4054bc5da2d /packaging
parentInitial commit. (diff)
downloadnetdata-be1c7e50e1e8809ea56f2c9d472eccd8ffd73a97.tar.xz
netdata-be1c7e50e1e8809ea56f2c9d472eccd8ffd73a97.zip
Adding upstream version 1.44.3.upstream/1.44.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'packaging')
-rw-r--r--packaging/PLATFORM_SUPPORT.md189
-rw-r--r--packaging/VERSIONING_AND_PUBLIC_API.md145
-rwxr-xr-xpackaging/build_package_install_test.sh37
-rw-r--r--packaging/building-native-packages-locally.md101
-rwxr-xr-xpackaging/bundle-ebpf-co-re.sh9
-rwxr-xr-xpackaging/bundle-ebpf.sh20
-rwxr-xr-xpackaging/bundle-libbpf.sh27
-rwxr-xr-xpackaging/bundle-protobuf.sh16
-rwxr-xr-xpackaging/check-kernel-config.sh76
-rw-r--r--packaging/current_libbpf.checksums1
-rw-r--r--packaging/current_libbpf.version1
-rw-r--r--packaging/docker/Dockerfile136
-rw-r--r--packaging/docker/README.md529
-rwxr-xr-xpackaging/docker/gen-cflags.sh9
-rwxr-xr-xpackaging/docker/health.sh18
-rwxr-xr-xpackaging/docker/run.sh120
-rw-r--r--packaging/ebpf-co-re.checksums1
-rw-r--r--packaging/ebpf-co-re.version1
-rw-r--r--packaging/ebpf.checksums3
-rw-r--r--packaging/ebpf.version1
-rw-r--r--packaging/go.d.checksums20
-rw-r--r--packaging/go.d.version1
-rw-r--r--packaging/installer/README.md275
-rw-r--r--packaging/installer/REINSTALL.md66
-rw-r--r--packaging/installer/UNINSTALL.md85
-rw-r--r--packaging/installer/UPDATE.md209
-rwxr-xr-xpackaging/installer/dependencies/alpine.sh114
-rwxr-xr-xpackaging/installer/dependencies/arch.sh102
-rwxr-xr-xpackaging/installer/dependencies/centos.sh202
-rwxr-xr-xpackaging/installer/dependencies/clearlinux.sh89
-rwxr-xr-xpackaging/installer/dependencies/debian.sh108
-rwxr-xr-xpackaging/installer/dependencies/fedora.sh122
-rwxr-xr-xpackaging/installer/dependencies/freebsd.sh145
-rwxr-xr-xpackaging/installer/dependencies/gentoo.sh100
-rwxr-xr-xpackaging/installer/dependencies/macos.sh0
-rwxr-xr-xpackaging/installer/dependencies/ol.sh160
-rwxr-xr-xpackaging/installer/dependencies/opensuse.sh106
-rwxr-xr-xpackaging/installer/dependencies/rhel.sh0
-rwxr-xr-xpackaging/installer/dependencies/rockylinux.sh167
-rwxr-xr-xpackaging/installer/dependencies/sabayon.sh0
-rwxr-xr-xpackaging/installer/dependencies/ubuntu.sh105
-rw-r--r--packaging/installer/functions.sh1080
-rwxr-xr-xpackaging/installer/install-required-packages.sh2112
l---------packaging/installer/kickstart-ng.sh1
l---------packaging/installer/kickstart-static64.sh1
-rwxr-xr-xpackaging/installer/kickstart.sh2332
-rw-r--r--packaging/installer/methods/ansible.md156
-rw-r--r--packaging/installer/methods/aws.md67
-rw-r--r--packaging/installer/methods/azure.md68
-rw-r--r--packaging/installer/methods/freebsd.md148
-rw-r--r--packaging/installer/methods/gcp.md70
-rw-r--r--packaging/installer/methods/kickstart.md237
-rw-r--r--packaging/installer/methods/kubernetes.md200
-rw-r--r--packaging/installer/methods/macos.md118
-rw-r--r--packaging/installer/methods/manual.md248
-rw-r--r--packaging/installer/methods/methods.md26
-rw-r--r--packaging/installer/methods/offline.md59
-rw-r--r--packaging/installer/methods/packages.md151
-rw-r--r--packaging/installer/methods/pfsense.md87
-rw-r--r--packaging/installer/methods/source.md244
-rw-r--r--packaging/installer/methods/synology.md64
-rw-r--r--packaging/installer/methods/systems.md18
-rwxr-xr-xpackaging/installer/netdata-uninstaller.sh778
-rwxr-xr-xpackaging/installer/netdata-updater.sh990
-rw-r--r--packaging/jsonc.checksums1
-rw-r--r--packaging/jsonc.version1
-rw-r--r--packaging/libbpf_0_0_9.checksums1
-rw-r--r--packaging/libbpf_0_0_9.version1
-rw-r--r--packaging/maintainers/README.md80
-rw-r--r--packaging/makeself/README.md75
-rwxr-xr-xpackaging/makeself/build-static.sh58
-rwxr-xr-xpackaging/makeself/build-x86_64-static.sh7
-rwxr-xr-xpackaging/makeself/build.sh65
-rw-r--r--packaging/makeself/bundled-packages16
-rwxr-xr-xpackaging/makeself/functions.sh105
-rwxr-xr-xpackaging/makeself/install-alpine-packages.sh49
-rwxr-xr-xpackaging/makeself/install-or-update.sh248
-rwxr-xr-xpackaging/makeself/jobs/10-prepare-destination.install.sh23
-rwxr-xr-xpackaging/makeself/jobs/20-openssl.install.sh56
-rwxr-xr-xpackaging/makeself/jobs/50-bash-5.1.16.install.sh49
-rwxr-xr-xpackaging/makeself/jobs/50-curl.install.sh78
-rwxr-xr-xpackaging/makeself/jobs/50-ioping-1.3.install.sh32
-rwxr-xr-xpackaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh39
-rwxr-xr-xpackaging/makeself/jobs/70-netdata-git.install.sh61
-rwxr-xr-xpackaging/makeself/jobs/90-netdata-runtime-check.sh54
-rwxr-xr-xpackaging/makeself/jobs/99-makeself.install.sh119
-rwxr-xr-xpackaging/makeself/makeself-header.sh717
-rw-r--r--packaging/makeself/makeself-help-header.txt49
-rw-r--r--packaging/makeself/makeself-license.txt41
-rw-r--r--packaging/makeself/makeself.lsm16
-rwxr-xr-xpackaging/makeself/makeself.sh780
-rwxr-xr-xpackaging/makeself/post-installer.sh11
-rwxr-xr-xpackaging/makeself/run-all-jobs.sh42
-rwxr-xr-xpackaging/makeself/uname2platform.sh18
-rw-r--r--packaging/protobuf.checksums1
-rw-r--r--packaging/protobuf.version1
-rw-r--r--packaging/repoconfig/Makefile35
-rwxr-xr-xpackaging/repoconfig/build-deb.sh49
-rwxr-xr-xpackaging/repoconfig/build-rpm.sh26
-rw-r--r--packaging/repoconfig/debian/changelog25
-rw-r--r--packaging/repoconfig/debian/compat1
-rw-r--r--packaging/repoconfig/debian/control19
-rw-r--r--packaging/repoconfig/debian/copyright10
-rwxr-xr-xpackaging/repoconfig/debian/rules21
-rw-r--r--packaging/repoconfig/debian/source/format1
-rw-r--r--packaging/repoconfig/netdata-edge.repo.al21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.centos21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.fedora21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.ol21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.suse19
-rw-r--r--packaging/repoconfig/netdata-repo.spec118
-rw-r--r--packaging/repoconfig/netdata.list.in2
-rw-r--r--packaging/repoconfig/netdata.repo.al21
-rw-r--r--packaging/repoconfig/netdata.repo.centos21
-rw-r--r--packaging/repoconfig/netdata.repo.fedora21
-rw-r--r--packaging/repoconfig/netdata.repo.ol21
-rw-r--r--packaging/repoconfig/netdata.repo.suse19
-rw-r--r--packaging/version1
-rw-r--r--packaging/yaml.checksums1
-rw-r--r--packaging/yaml.version1
120 files changed, 16281 insertions, 0 deletions
diff --git a/packaging/PLATFORM_SUPPORT.md b/packaging/PLATFORM_SUPPORT.md
new file mode 100644
index 00000000..5448e5da
--- /dev/null
+++ b/packaging/PLATFORM_SUPPORT.md
@@ -0,0 +1,189 @@
+<!--
+title: "Platform support policy"
+sidebar_label: "Platform support policy"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/PLATFORM_SUPPORT.md"
+learn_status: "Published"
+sidebar_position: "1"
+learn_topic_type: "Tasks"
+learn_rel_path: "Installation"
+learn_docs_purpose: "Present all the supported platform in the Netdata solution"
+-->
+
+# Platform support policy
+
+Netdata defines three tiers of official support:
+
+- [Core](#core)
+- [Intermediate](#intermediate)
+- [Community](#community)
+
+Each tier defines different guarantees for platforms in that tier, described below in the section about that tier.
+
+Additionally, we define two categories for special cases that we do not support:
+
+- [Third-party supported platforms](#third-party-supported-platforms)
+- [Previously supported platforms](#previously-supported-platforms)
+
+These two categories are explained further below.
+
+Any platforms not listed in any of these categories may or may not work.
+
+The following table shows a general outline of the various support tiers and categories.
+
+| | Bug Support | Guaranteed Configurations | CI Coverage | Native Packages | Static Build Support |
+| - | ----------- | ------------------------- | ----------- | --------------- | -------------------- |
+| Core | High priority | Everything but rare edge cases | Full | Yes, if we can provide them | Full |
+| Intermediate | Normal priority | Common cases | Partial (CI mostly equivalent to **Core**, but possibly with some gaps, and not required to pass) | Possibly | Full |
+| Community | Best Effort | Default only | None | No | Best Effort |
+| Third-party Supported | Users directed to platform maintainers | None | None | No | Best Effort |
+| Previously Supported | Users asked to upgrade | None | None | Yes, but only already published versions | Best Effort |
+
+- ‘Bug Support’: How we handle of platform-specific bugs.
+- ‘Guaranteed Configurations’: Which runtime configurations for the agent we try to guarantee will work with minimal
+ effort from users.
+- ‘CI Coverage’: What level of coverage we provide for the platform in CI.
+- ‘Native Packages’: Whether we provide native packages for the system package manager for the platform.
+- ‘Static Build Support’: How well our static builds are expected to work on the platform.
+
+## Currently supported platforms
+
+### Core
+
+Platforms in the core support tier are our top priority. They are covered rigorously in our CI, usually
+include official binary packages, and any platform-specific bugs receive a high priority. From the perspective
+of our developers, platforms in the core support tier _must_ work, with almost no exceptions.
+Our [static builds](#static-builds) are expected to work on these platforms if available. Source-based installs are
+expected
+to work on these platforms with minimal user effort.
+
+| Platform | Version | Official Native Packages | Notes |
+|--------------------------|----------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
+| Alpine Linux | 3.18 | No | The latest release of Alpine Linux is guaranteed to remain at **Core** tier due to usage for our Docker images |
+| Alma Linux | 9.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives |
+| Alma Linux | 8.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives |
+| Amazon Linux | 2023 | x86\_64, AArch64 | |
+| Amazon Linux | 2 | x86\_64, AArch64 | |
+| CentOS | 7.x | x86\_64 | |
+| Docker | 19.03 or newer | x86\_64, i386, ARMv7, AArch64, POWER8+ | See our [Docker documentation](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md) for more info on using Netdata on Docker |
+| Debian | 12.x | x86\_64, i386, ARMv7, AArch64 | |
+| Debian | 11.x | x86\_64, i386, ARMv7, AArch64 | |
+| Debian | 10.x | x86\_64, i386, ARMv7, AArch64 | |
+| Fedora | 39 | x86\_64, AArch64 | |
+| Fedora | 38 | x86\_64, AArch64 | |
+| Fedora | 37 | x86\_64, AArch64 | |
+| openSUSE | Leap 15.4 | x86\_64, AArch64 | |
+| openSUSE | Leap 15.5 | x86\_64, AArch64 | |
+| Oracle Linux | 9.x | x86\_64, AArch64 | |
+| Oracle Linux | 8.x | x86\_64, AArch64 | |
+| Red Hat Enterprise Linux | 9.x | x86\_64, AArch64 | |
+| Red Hat Enterprise Linux | 8.x | x86\_64, AArch64 | |
+| Red Hat Enterprise Linux | 7.x | x86\_64 | |
+| Ubuntu | 23.10 | x86\_64, AArch64, ARMv7 | |
+| Ubuntu | 23.04 | x86\_64, AArch64, ARMv7 | |
+| Ubuntu | 22.04 | x86\_64, ARMv7, AArch64 | |
+| Ubuntu | 20.04 | x86\_64, ARMv7, AArch64 | |
+
+
+### Intermediate
+
+Platforms in the intermediate support tier are those which Netdata wants to support, but cannot justify core level
+support for. They are also covered in CI, but not as rigorously as the core tier. They may or may not include
+official binary packages, and any platform-specific bugs receive a normal priority. Generally, we will add new
+platforms that we officially support ourselves to the intermediate tier. Our [static builds](#static-builds) are
+expected to work on these platforms if available. Source-based installs are expected to work on these platforms
+with minimal user effort.
+
+| Platform | Version | Official Native Packages | Notes |
+|---------------|------------|--------------------------|------------------------------------------------------------------------------------------------------|
+| Alpine Linux | Edge | No | |
+| Alpine Linux | 3.17 | No | |
+| Alpine Linux | 3.16 | No | |
+| Arch Linux | Latest | No | We officially recommend the community packages available for Arch Linux |
+| Manjaro Linux | Latest | No | We officially recommend the community packages available for Arch Linux |
+| openSUSE | Tumbleweed | x86\_64, AArch64 | Scheduled for promotion to Core tier at some point after the release of v1.41.0 of the Netdata Agent |
+
+### Community
+
+Platforms in the community support tier are those which are primarily supported by community contributors. They may
+receive some support from Netdata, but are only a best-effort affair. When a community member makes a contribution
+to add support for a new platform, that platform generally will start in this tier. Our [static builds](#static-builds)
+are expected to work on these platforms if available. Source-based installs are usually expected to work on these
+platforms, but may require some extra effort from users.
+
+| Platform | Version | Official Native Packages | Notes |
+|--------------|------------|--------------------------|-----------------------------------------------------------------------------------------------------------|
+| Clear Linux | Latest | No | |
+| Debian | Sid | No | |
+| Fedora | Rawhide | No | |
+| FreeBSD | 13-STABLE | No | Netdata is included in the FreeBSD Ports Tree, and this is the recommended installation method on FreeBSD |
+| Gentoo | Latest | No | |
+| macOS | 13 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies |
+| macOS | 12 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies |
+| macOS | 11 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies. |
+
+## Third-party supported platforms
+
+Some platform maintainers actively support Netdata on their platforms even though we do not provide official
+support. Third-party supported platforms may work, but the experience of using Netdata on such platforms is not
+something we can guarantee. When you use an externally supported platform and report a bug, we will either ask
+you to reproduce the issue on a supported platform or submit a support request directly to the platform maintainers.
+
+Currently, we know of the following platforms having some degree of third-party support for Netdata:
+
+- NixOS: Netdata’s official installation methods do not support NixOS, but the NixOS maintainers provide their
+ own Netdata packages for their platform.
+- Rockstor: Rockstor provides support for a Netdata add-on for their NAS platform. The Rockstor community and
+ developers are the primary source for support on their platform.
+
+## Previously supported platforms
+
+As platforms become end of life upstream, Netdata will stop officially supporting them. We will not actively break
+things on these platforms, but we will also not make any effort to ensure that things keep working on them either.
+If you report a bug on a previously supported platforms, we will ask you to reproduce the issue on a currently
+supported platform. If the issue is not reproducible, it will be closed.
+
+We consider a platform to be end of life when the upstream maintainers of that platform stop providing official
+support for it themselves, or when that platform transitions into an ‘extended security maintenance’ period.
+Platforms that meet these criteria will be immediately transitioned to the **Previously Supported** category,
+with no prior warning from Netdata and no deprecation notice, unlike those being dropped for technical reasons,
+as our end of support should already coincide with the end of the normal support lifecycle for that platform.
+
+On occasion, we may also drop support for a platform due to technical limitations. In such cases, this will be
+announced in the release notes of the next stable release with a deprecation notice. The platform will be supported
+for _that release_, and will be removed from nightlies some time before the next release after that one.
+
+This is a list of platforms that we have supported in the recent past but no longer officially support:
+
+| Platform | Version | Notes |
+|--------------|-----------|----------------------|
+| Alpine Linux | 3.14 | EOL as of 2023-05-01 |
+| Alpine Linux | 3.13 | EOL as of 2022-11-01 |
+| Debian | 9.x | EOL as of 2022-06-30 |
+| Fedora | 36 | EOL as of 2023-05-18 |
+| Fedora | 35 | EOL as of 2022-12-13 |
+| openSUSE | Leap 15.3 | EOL as of 2022-12-01 |
+| Ubuntu | 22.10 | EOL as of 2023-07-20 |
+| Ubuntu | 21.10 | EOL as of 2022-07-31 |
+| Ubuntu | 18.04 | EOL as of 2023-04-02 |
+
+## Static builds
+
+The Netdata team provides static builds of Netdata for Linux systems with a selection of common CPU
+architectures. These static builds are largely self-contained, only requiring a a POSIX-compliant shell on the target
+system to provide their basic functionality. Static builds are built in an Alpine Linux environment using musl. This
+means that they generally do not support non-local username mappings or exotic name resolution configurations.
+
+We currently provide static builds for the following CPU architectures:
+
+- 32-bit x86
+- 64-bit x86
+- ARMv7
+- AArch64
+- POWER8+
+
+## Platform-specific support considerations
+
+### IPMI
+
+Our IPMI collector is based on FreeIPMI. Due to upstream limitations in FreeIPMI, we are unable to support our
+IPMI collector on POWER-based hardware.
diff --git a/packaging/VERSIONING_AND_PUBLIC_API.md b/packaging/VERSIONING_AND_PUBLIC_API.md
new file mode 100644
index 00000000..79c53785
--- /dev/null
+++ b/packaging/VERSIONING_AND_PUBLIC_API.md
@@ -0,0 +1,145 @@
+# Netdata Agent Versioning Policy (DRAFT)
+
+This document outlines how versions are handled for the Netdata Agent. This policy applies to version 2.0.0 of
+the Netdata Agent and newer versions.
+
+## Stable Releases
+
+Versions for stable releases of the Netdata Agent consist of three parts, a major version, a minor version, and
+a patch version, presented like `<major>.<minor>.<patch>`. For example, a version of `1.42.3` has a major version
+of 1, a minor version of 42, and a patch version of 3.
+
+The patch version is incremented when a new stable release is made that only contains bug fixes that do not alter
+the public API in a backwards incompatible manner. Special exceptions may be made for critical security bugs,
+but such exceptions will be prominently noted in the release notes for the versions for which they are made.
+
+The minor version is incremented when a new stable release is made that contains new features and functionality
+that do not alter the strictly defined parts of public API in a backwards incompatible manner. A new minor version
+may have changes to the loosely defined parts of the public API that are not backwards compatible, but unless
+they are critical security fixes they will be announced ahead of time in the release notes for the previous minor
+version. Once a new minor version is published, no new patch releases will be published for previous minor versions
+unless they fix serious bugs.
+
+The major version is incremented when a new stable release is made that alters the strictly defined public API in
+some backwards incompatible manner. Any backwards incompatible changes that will be included in a new major version
+will be announced ahead of time in the release notes for the previous minor version. Once a given major version
+is published, no new minor releases will be published for any prior major version, though new patch releases _may_
+be published for the latest minor release of any prior major version to fix serious bugs.
+
+In most cases, just prior to a new major version being published, a final stable minor release will be published
+for the previous major version, including all non-breaking changes that will be in the new major version. This is
+intended to ensure that users who choose to remain on the previous major version for an extended period of time
+will be as up-to-date as possible.
+
+## Nightly Builds
+
+Versions for nightly builds of the Netdata Agent consist of four parts, a major version, a minor version, a revision
+number, and an optional commit ID, presented like `<major>.<minor>.0-<revision>-<commit>`. For example, a version
+of `1.43.0-11-gb15437502` has a major version of 1, a minor version of 43, a revision of 11, and a commit ID of
+`gb15437502`. A commit ID consists of a lowercaase letter `g`, followed by the short commit hash for the corresponding
+commit. If the commit ID is not included, it may be replaced by the word ‘nightly’.
+
+The major and minor version numbers for a nightly build correspond exactly to an associated stable release. A
+given major version of a nightly build has the same compatibility guarantees as it would for a stable release. A
+given minor version of a nightly build will generally include any backwards-incompatible changes to the loosely
+defined public API that will be in the _next_ minor version of the associated stable release.
+
+The revision number indicates the number of commits on the main branch of the Netdata Agent git repository since
+the associated stable release, and the commit ID, if included, should indicate the exact commit hash used for the
+nightly build.
+
+Due to how our release process works, nightly version numbers do not track stable patch releases. For example, if the
+latest stable release is `1.42.4`, the latest nightly version will still show something like `1.42.0-209-nightly`. The
+first nightly build version published after an associated stable release will include all relevant fixes that were
+in that stable release. In addition, in most cases, the last nightly build version published before an associated
+stable patch release will include all relevant fixes that are in that patch release.
+
+Nightly builds are only published on days when changes have actually been committed to the main branch of the
+Netdata Agent git repository.
+
+## Public API
+
+The remainder of the document outlines the public API of the Netdata agent.
+
+We define two categories of components within the public API:
+
+- Strictly defined components are guaranteed not to change in a backwards incompatible manner without an associated
+ major version bump, and will have impending changes announced in the release notes at least one minor release
+ before they are changed.
+- Loosely defined components are guaranteed not to change in a backwards incompatible manner without an associated
+ minor version bump, and will have impending changes announced in the release notes at least one minor release
+ before they are changed.
+
+There are also a few things we handle specially, which will be noted later in the document.
+
+### Strictly Defined Public API Components
+
+The following aspects of the public API are strictly defined, and are guaranteed not to change in a backwards
+incompatible manner without an associated major version increase, and such changes will be announced in the release
+notes at least one minor release prior to being merged:
+
+- All mandatory build dependencies which are not vendored in the Netdata Agent code. This includes, but is not
+ limited to:
+ - The underlying build system (such as autotools or CMake).
+ - Primary library dependencies (such as libuv).
+ - Any external tooling that is required at build time.
+- The REST API provided by the Netdata Agent’s internal web server, accessible via the `/api` endpoint. This
+ does not extend to the charts, labels, or other system-specific data returned by some API endpoints.
+- The protocol used for streaming and replicating data between Netdata Agents.
+- The protocol used for communicating with external data collection plugins.
+- The APIs provided by the `python.d.plugin` and `charts.d.plugin` data collection frameworks.
+- The set of optional features supported by the Agent which are provided by default in our pre-built packages. If
+ support for an optional feature is being completely removed from the agent, that is instead covered by what
+ component that feature is part of.
+
+### Loosely Defined Public API Components
+
+The following aspects of the public API are loosely defined. They are guaranteed to not change in a backwards
+incompatible manner without an associated minor version increase, and such changes will be announced in the release
+notes at least one minor release prior to being merged:
+
+- Configuration options in any configuration file normally located under `/etc/netdata` on a typical install,
+ as well as their default values.
+- Environment variables that are interpreted by the Netdata Agent, or by the startup code in our official OCI
+ container images.
+- The exact set of charts provided, including chart families, chart names, and provided metrics.
+- The exact set of supported data collection sources and data export targets.
+- The exact set of system service managers we officially support running the Netdata Agent under.
+- The exact set of alert delivery mechanisms supported by the Netdata Agent.
+- The high-level implementation of the Netdata Agent’s integrated web server.
+- The v0 and v1 dashboard UIs provided through the Netdata Agent’s internal web server.
+
+All loosely defined API components may also change in a backwards incompatible manner if the major version is
+increased. Large scale changes to these components may also warrant a major version increase even if there are no
+backwards incompatible changes to strictly defined public API components.
+
+### Special Cases
+
+The following special exceptions to the public API exist:
+
+- When an internal on-disk file format (such as the dbengine data file format) is changed, the old format is
+ guaranteed to be supported for in-place updates for at least two minor versions after the change happens. The
+ new format is not guaranteed to be backwards compatible.
+- The list of supported platforms is functionally a part of the public API, but our existing [platform support
+ policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md) dictates when and how
+ support for specific platforms is added or removed.
+- The list of components provided as separate packages in our official native packages is considered part of our
+ strictly defined public API, but changes to our packaging that do not alter the functionality of existing installs
+ are considered to be backwards compatible. This means that we may choose to split a plugin out to it’s own
+ package at any time, but it will remain as a mandatory dependency until at least the next major release.
+- Options and environment variables used by the `kickstart.sh` install script and the `netdata-updater.sh` script
+ are handled separately from regular Netdata Agent versioning. Backwards compatible changes may happen at any
+ time for these, while backwards incompatible changes will have a deprecation period during which the old behavior
+ will be preserved but will issue a warning about the impending change.
+
+### Things Not Covered By The Public API
+
+Any components which are not explicitly listed above as being part of the public API are not part of the public
+API. This includes, but is not limited to:
+
+- Any mandatory build components which are vendored as part of the Netdata sources, such as SQLite3 or libJudy. This
+ extends to both the presence or abscence of such components, as well as the exact version being bundled.
+- The exact installation mechanism that will be used on any given system when using our `kickstart.sh` installation
+ script.
+- The exact underlying implementation of any data collection plugin.
+- The exact underlying implementation of any data export mechanism.
diff --git a/packaging/build_package_install_test.sh b/packaging/build_package_install_test.sh
new file mode 100755
index 00000000..e3b3362d
--- /dev/null
+++ b/packaging/build_package_install_test.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+# If we are not in netdata git repo, at the top level directory, FAIL
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" = "netdata" ]; then
+ echo "Run as ./packaging/$(basename "$0") from top level directory of netdata git repository"
+ exit 1
+fi
+
+if [ $# -lt 2 ] || [ $# -gt 3 ]; then
+ echo "Usage: ./packaging/$(basename "$0") <distro> <distro_version> [<netdata_version>]"
+ exit 1
+fi
+
+if ! command -v docker > /dev/null; then
+ echo "Docker CLI not found. You need Docker to run this!"
+ exit 2
+fi
+
+DISTRO="$1"
+DISTRO_VERSION="$2"
+# TODO: Auto compute this?
+VERSION="${3:-1.19.0}"
+
+TAG="netdata/netdata:${DISTRO}_${DISTRO_VERSION}"
+
+docker build \
+ -f ./packaging/Dockerfile.packager \
+ --build-arg DISTRO="$DISTRO" \
+ --build-arg DISTRO_VERSION="$DISTRO_VERSION" \
+ --build-arg VERSION="$VERSION" \
+ -t "$TAG" . |
+ tee build.log
diff --git a/packaging/building-native-packages-locally.md b/packaging/building-native-packages-locally.md
new file mode 100644
index 00000000..6ad1d604
--- /dev/null
+++ b/packaging/building-native-packages-locally.md
@@ -0,0 +1,101 @@
+# Build native (DEB/RPM) packages for testing
+
+This document provides instructions for developers who need to build native packages locally for testing.
+
+## Requirements
+
+To build native packages locally, you will need the following:
+
+* A working Docker or Podman host.
+* A local copy of the source tree you want to build from.
+
+## Building the packages
+
+In the root of the source tree from which you want to build, clean up any existing files left over from a previous build
+and then run:
+
+```bash
+docker run -it --rm -e VERSION=0.1 -v $PWD:/netdata netdata/package-builders:<tag>
+```
+
+or
+
+```bash
+podman run -it --rm -e VERSION=0.1 -v $PWD:/netdata netdata/package-builders:<tag>
+```
+
+The `<tag>` should be the lowercase distribution name with no spaces, followed by the
+release of that distribution. For example, `centos7` to build on CentOS 7, or `ubuntu20.04`
+to build on Ubuntu 20.04. Note that we use Rocky Linux for builds on CentOS/RHEL 8 or newer. See
+[netdata/package-builders](https://hub.docker.com/r/netdata/package-builders/tags) for all available tags.
+
+The value passed in the `VERSION` environment variable can be any version number accepted by the type of package
+being built. As a general rule, it needs to start with a digit, and must include a `.` somewhere.
+
+Once it finishes, the built packages can be found under `artifacts/` in the source tree.
+
+If an error is encountered and the build is being run interactively, it will drop to a shell to allow you to
+inspect the state of the container and look at build logs.
+
+### Detailed explanation
+
+The environments used for building our packages are fully self-contianed Docker images built from [Dockerfiles](https://github.com/netdata/helper-images/tree/master/package-builders)
+These are published on Docker
+Hub with the image name `netdata/package-builders`, and tagged using the name and version of the distribution
+(with the tag corresponding to the suffix on the associated Dockerfile).
+
+The build code expects the following requirements to be met:
+
+- It expects the source tree it should build from to be located at `/netdata`, and expects that said source tree
+ is clean (no artifacts left over from previous builds).
+- It expects an environment variable named `VERSION` to be defined, and uses this to control what version number
+ will be shown in the package metadata and filenames.
+
+Internally, the source tree gets copied to a temporary location for the build process so that the source tree can
+be mounted directly from the host without worrying about leaving a dirty tree behind, any templating or file
+movements required for the build to work are done, the package build command is invoked with the correct arguments,
+and then the resultant packages are copied to the `artifacts/` directory in the original source tree so they are
+accessible after the container exits.
+
+## Finding build logs after a failed build
+
+Build logs and artifacts can be found in the build directory, whose location varies by distribution.
+
+On DEB systems (Ubuntu and Debian), the build directory inside the container is located at `/usr/src/netdata`
+
+On RPM systems except openSUSE, the build directory inside the container is located under `/root/rpmbuild/BUILD/`
+and varies based on the package version number.
+
+On openSUSE, the build directory inside the container is located under `/usr/src/packages/BUILD`and varies based
+on the package version number.
+
+## Building for other architectures
+
+If you need to test a build for an architecture that does not match your host system, you can do so by setting up
+QEMU user-mode emulation. This requires a Linux kernel with binfmt\_misc support (all modern distributions provide
+this out of the box, but I’m not sure about WSL or Docker Desktop).
+
+The quick and easy way to do this is to run the following:
+
+```bash
+docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
+```
+
+or
+
+```bash
+podman run --rm --privileged multiarch/qemu-user-static --reset -p yes
+```
+
+This will set up the required QEMU user-mode emulation until you reboot. Note that if using Podman, you will need
+to run this as root and not as a rootless container (the package builds work fine in a rootless container though,
+even if doing cross-architecture builds).
+
+Once you have that set up, the command to build the packages is the same as above, you just need to add a correct
+`--platform` option to the `docker run` or `podman run` command. The current list of architectures we build for,
+and the correct value for the `--platform` option is:
+
+- 32-bit ARMv7: `linux/arm/v7`
+- 64-bit ARMv8: `linux/arm64/v8`
+- 32-bit x86: `linux/i386`
+- 64-bit x86: `linux/amd64`
diff --git a/packaging/bundle-ebpf-co-re.sh b/packaging/bundle-ebpf-co-re.sh
new file mode 100755
index 00000000..460709b6
--- /dev/null
+++ b/packaging/bundle-ebpf-co-re.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+SRCDIR="${1}"
+
+CORE_VERSION="$(cat "${SRCDIR}/packaging/ebpf-co-re.version")"
+CORE_TARBALL="netdata-ebpf-co-re-glibc-${CORE_VERSION}.tar.xz"
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/ebpf-co-re/releases/download/${CORE_VERSION}/${CORE_TARBALL}" > "${CORE_TARBALL}" || exit 1
+grep "${CORE_TARBALL}" "${SRCDIR}/packaging/ebpf-co-re.checksums" | sha256sum -c - || exit 1
+tar -xa --no-same-owner -f "${CORE_TARBALL}" -C "${SRCDIR}/collectors/ebpf.plugin" || exit 1
diff --git a/packaging/bundle-ebpf.sh b/packaging/bundle-ebpf.sh
new file mode 100755
index 00000000..11930671
--- /dev/null
+++ b/packaging/bundle-ebpf.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+SRCDIR="${1}"
+PLUGINDIR="${2}"
+FORCE="${3}"
+
+EBPF_VERSION="$(cat "${SRCDIR}/packaging/ebpf.version")"
+EBPF_TARBALL="netdata-kernel-collector-glibc-${EBPF_VERSION}.tar.xz"
+
+if [ -x "${PLUGINDIR}/ebpf.plugin" ] || [ "${FORCE}" = "force" ]; then
+ mkdir -p "${SRCDIR}/tmp/ebpf"
+ curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1
+ grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1
+ tar -xva --no-same-owner -f "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1
+ if [ ! -d "${PLUGINDIR}/ebpf.d" ];then
+ mkdir "${PLUGINDIR}/ebpf.d"
+ fi
+ # shellcheck disable=SC2046
+ cp -r $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d"
+fi
diff --git a/packaging/bundle-libbpf.sh b/packaging/bundle-libbpf.sh
new file mode 100755
index 00000000..52f7cf45
--- /dev/null
+++ b/packaging/bundle-libbpf.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+if [ "$(uname -m)" = x86_64 ]; then
+ lib_subdir="lib64"
+else
+ lib_subdir="lib"
+fi
+
+if [ "${2}" != "centos7" ]; then
+ cp "${1}/packaging/current_libbpf.checksums" "${1}/packaging/libbpf.checksums"
+ cp "${1}/packaging/current_libbpf.version" "${1}/packaging/libbpf.version"
+else
+ cp "${1}/packaging/libbpf_0_0_9.checksums" "${1}/packaging/libbpf.checksums"
+ cp "${1}/packaging/libbpf_0_0_9.version" "${1}/packaging/libbpf.version"
+fi
+
+LIBBPF_TARBALL="v$(cat "${1}/packaging/libbpf.version").tar.gz"
+LIBBPF_BUILD_PATH="${1}/externaldeps/libbpf/libbpf-$(cat "${1}/packaging/libbpf.version")"
+
+mkdir -p "${1}/externaldeps/libbpf" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libbpf/archive/${LIBBPF_TARBALL}" > "${LIBBPF_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/libbpf.checksums" || exit 1
+tar -xz --no-same-owner -f "${LIBBPF_TARBALL}" -C "${1}/externaldeps/libbpf" || exit 1
+make -C "${LIBBPF_BUILD_PATH}/src" BUILD_STATIC_ONLY=1 OBJDIR=build/ DESTDIR=../ install || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/usr/${lib_subdir}/libbpf.a" "${1}/externaldeps/libbpf" || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/usr/include" "${1}/externaldeps/libbpf" || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/include/uapi" "${1}/externaldeps/libbpf/include" || exit 1
diff --git a/packaging/bundle-protobuf.sh b/packaging/bundle-protobuf.sh
new file mode 100755
index 00000000..d715dfe3
--- /dev/null
+++ b/packaging/bundle-protobuf.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+PROTOBUF_TARBALL="protobuf-cpp-$(cat "${1}/packaging/protobuf.version").tar.gz"
+PROTOBUF_BUILD_PATH="${1}/externaldeps/protobuf/protobuf-$(cat "${1}/packaging/protobuf.version")"
+
+mkdir -p "${1}/externaldeps/protobuf" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/protocolbuffers/protobuf/releases/download/v$(cat "${1}/packaging/protobuf.version")/${PROTOBUF_TARBALL}" > "${PROTOBUF_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/protobuf.checksums" || exit 1
+tar -xz --no-same-owner -f "${PROTOBUF_TARBALL}" -C "${1}/externaldeps/protobuf" || exit 1
+OLDPWD="${PWD}"
+cd "${PROTOBUF_BUILD_PATH}" || exit 1
+./configure --disable-shared --without-zlib --disable-dependency-tracking --with-pic || exit 1
+make -j "$(nproc)" || exit 1
+cd "${OLDPWD}" || exit 1
+
+cp -a "${PROTOBUF_BUILD_PATH}/src" "${1}/externaldeps/protobuf" || exit 1
diff --git a/packaging/check-kernel-config.sh b/packaging/check-kernel-config.sh
new file mode 100755
index 00000000..515259c1
--- /dev/null
+++ b/packaging/check-kernel-config.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+get_kernel_version() {
+ r="$(uname -r | cut -f 1 -d '-')"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+get_rh_version() {
+ if [ ! -f /etc/redhat-release ]; then
+ printf "000000000"
+ return
+ fi
+
+ r="$(cut -f 4 -d ' ' < /etc/redhat-release)"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+if [ "$(uname -s)" != "Linux" ]; then
+ echo >&2 "This does not appear to be a Linux system."
+ exit 1
+fi
+
+KERNEL_VERSION="$(uname -r)"
+
+if [ "$(get_kernel_version)" -lt 004014000 ] && [ "$(get_rh_version)" -lt 0070061810 ]; then
+ echo >&2 "WARNING: Your kernel appears to be older than 4.11 or you are using RH version older than 7.6.1810. This may still work in some cases, but probably won't."
+fi
+
+CONFIG_PATH=""
+MODULE_LOADED=""
+
+if modprobe configs 2> /dev/null; then
+ MODULE_LOADED=1
+fi
+
+if [ -r /proc/config.gz ]; then
+ CONFIG_PATH="/proc/config.gz"
+elif [ -r "/lib/modules/${KERNEL_VERSION}/source/.config" ]; then
+ CONFIG_PATH="/lib/modules/${KERNEL_VERSION}/source/.config"
+elif [ -r "/lib/modules/${KERNEL_VERSION}.x86_64/source/.config" ]; then
+ CONFIG_PATH="/lib/modules/${KERNEL_VERSION}.x86_64/source/.config"
+elif [ -n "$(find /boot -name "config-${KERNEL_VERSION}*")" ]; then
+ CONFIG_PATH="$(find /boot -name "config-${KERNEL_VERSION}*" | head -n 1)"
+fi
+
+if [ -n "${CONFIG_PATH}" ]; then
+ GREP='grep'
+ CAT='cat'
+
+ if echo "${CONFIG_PATH}" | grep -q '.gz'; then
+ CAT='zcat'
+ fi
+
+ REQUIRED_CONFIG="KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT"
+
+ for required_config in ${REQUIRED_CONFIG}; do
+ # Fix issue https://github.com/netdata/netdata/issues/14668
+ # if ! "${GREP}" -q "CONFIG_${required_config}=y" "${CONFIG_PATH}"; then
+ if ! { "${CAT}" "${CONFIG_PATH}" | "${GREP}" -q "CONFIG_${required_config}=y" >&2 >/dev/null; } ;then
+ echo >&2 " Missing Kernel Config: ${required_config}"
+ exit 1
+ fi
+ done
+fi
+
+if [ -n "${MODULE_LOADED}" ]; then
+ modprobe -r configs 2> /dev/null || true # Ignore failures from CONFIGS being builtin
+fi
+
+exit 0
diff --git a/packaging/current_libbpf.checksums b/packaging/current_libbpf.checksums
new file mode 100644
index 00000000..8279c1dd
--- /dev/null
+++ b/packaging/current_libbpf.checksums
@@ -0,0 +1 @@
+05e4ccdd3bc8532290eebc37b37455b253071244d30e42412a7071d89221f1c8 v1.2.2p_netdata.tar.gz
diff --git a/packaging/current_libbpf.version b/packaging/current_libbpf.version
new file mode 100644
index 00000000..b5dc2d8b
--- /dev/null
+++ b/packaging/current_libbpf.version
@@ -0,0 +1 @@
+1.2.2p_netdata
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
new file mode 100644
index 00000000..8e7c9a7b
--- /dev/null
+++ b/packaging/docker/Dockerfile
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+# author : paulfantom
+
+# This image contains preinstalled dependencies
+# hadolint ignore=DL3007
+FROM netdata/builder:v2 as builder
+
+# One of 'nightly' or 'stable'
+ARG RELEASE_CHANNEL=nightly
+
+ARG CFLAGS
+
+ENV CFLAGS=$CFLAGS
+
+ARG EXTRA_INSTALL_OPTS
+
+ENV EXTRA_INSTALL_OPTS=$EXTRA_INSTALL_OPTS
+
+ARG DEBUG_BUILD
+
+ENV DEBUG_BUILD=$DEBUG_BUILD
+
+# Copy source
+COPY . /opt/netdata.git
+WORKDIR /opt/netdata.git
+
+# Install from source
+RUN chmod +x netdata-installer.sh && \
+ cp -rp /deps/* /usr/local/ && \
+ /bin/echo -e "INSTALL_TYPE='oci'\nPREBUILT_ARCH='$(uname -m)'" > ./system/.install-type && \
+ CFLAGS="$(packaging/docker/gen-cflags.sh)" LDFLAGS="-Wl,--gc-sections" ./netdata-installer.sh --dont-wait --dont-start-it --use-system-protobuf \
+ ${EXTRA_INSTALL_OPTS} --disable-ebpf --one-time-build --enable-lto "$([ "$RELEASE_CHANNEL" = stable ] && echo --stable-channel)"
+
+# files to one directory
+RUN mkdir -p /app/usr/sbin/ \
+ /app/usr/share \
+ /app/usr/libexec \
+ /app/usr/local \
+ /app/usr/lib \
+ /app/var/cache \
+ /app/var/lib \
+ /app/etc && \
+ mv /usr/share/netdata /app/usr/share/ && \
+ mv /usr/libexec/netdata /app/usr/libexec/ && \
+ mv /usr/lib/netdata /app/usr/lib/ && \
+ mv /var/cache/netdata /app/var/cache/ && \
+ mv /var/lib/netdata /app/var/lib/ && \
+ mv /etc/netdata /app/etc/ && \
+ mv /usr/sbin/netdata /app/usr/sbin/ && \
+ mv /usr/sbin/netdata-claim.sh /app/usr/sbin/ && \
+ mv /usr/sbin/netdatacli /app/usr/sbin/ && \
+ mv /usr/sbin/systemd-cat-native /app/usr/sbin/ && \
+ mv packaging/docker/run.sh /app/usr/sbin/ && \
+ mv packaging/docker/health.sh /app/usr/sbin/ && \
+ mkdir -p /deps/etc && \
+ cp -rp /deps/etc /app/usr/local/etc && \
+ chmod -R o+rX /app && \
+ chmod +x /app/usr/sbin/run.sh
+
+#####################################################################
+# This image contains preinstalled dependencies
+# hadolint ignore=DL3007
+FROM netdata/base:v2 as base
+
+LABEL org.opencontainers.image.authors="Netdatabot <bot@netdata.cloud>"
+LABEL org.opencontainers.image.url="https://netdata.cloud"
+LABEL org.opencontainers.image.documentation="https://learn.netdata.cloud"
+LABEL org.opencontainers.image.source="https://github.com/netdata/netdata"
+LABEL org.opencontainers.image.title="Netdata Agent"
+LABEL org.opencontainers.image.description="Official Netdata Agent Docker Image"
+LABEL org.opencontainers.image.vendor="Netdata Inc."
+
+ARG OFFICIAL_IMAGE=false
+ENV NETDATA_OFFICIAL_IMAGE=$OFFICIAL_IMAGE
+
+ONBUILD ENV NETDATA_OFFICIAL_IMAGE=false
+
+ARG NETDATA_UID=201
+ARG NETDATA_GID=201
+ENV DOCKER_GRP netdata
+ENV DOCKER_USR netdata
+# If DISABLE_TELEMETRY is set, it will disable anonymous stats collection and reporting
+#ENV DISABLE_TELEMETRY=1
+ENV NETDATA_LISTENER_PORT 19999
+EXPOSE $NETDATA_LISTENER_PORT
+
+ENV NETDATA_EXTRA_DEB_PACKAGES=""
+
+RUN mkdir -p /opt/src /var/log/netdata && \
+ ln -sf /dev/stdout /var/log/netdata/access.log && \
+ ln -sf /dev/stdout /var/log/netdata/aclk.log && \
+ ln -sf /dev/stdout /var/log/netdata/debug.log && \
+ ln -sf /dev/stderr /var/log/netdata/error.log && \
+ ln -sf /dev/stderr /var/log/netdata/daemon.log && \
+ ln -sf /dev/stdout /var/log/netdata/collector.log && \
+ ln -sf /dev/stdout /var/log/netdata/fluentbit.log && \
+ ln -sf /dev/stdout /var/log/netdata/health.log
+
+COPY --from=builder /app /
+
+# Create netdata user and apply the permissions as described in
+# https://docs.netdata.cloud/docs/netdata-security/#netdata-directories, but own everything by root group due to https://github.com/netdata/netdata/pull/6543
+# hadolint ignore=DL3013
+RUN addgroup --gid ${NETDATA_GID} --system "${DOCKER_GRP}" && \
+ adduser --system --no-create-home --shell /usr/sbin/nologin --uid ${NETDATA_UID} --home /etc/netdata --group "${DOCKER_USR}" && \
+ chown -R root:root \
+ /etc/netdata \
+ /usr/share/netdata \
+ /usr/libexec/netdata && \
+ chown -R netdata:root \
+ /usr/lib/netdata \
+ /var/cache/netdata \
+ /var/lib/netdata \
+ /var/log/netdata && \
+ chown -R netdata:netdata /var/lib/netdata/cloud.d && \
+ chmod 0700 /var/lib/netdata/cloud.d && \
+ chmod 0755 /usr/libexec/netdata/plugins.d/*.plugin && \
+ for name in cgroup-network \
+ local-listeners \
+ apps.plugin \
+ debugfs.plugin \
+ freeipmi.plugin \
+ go.d.plugin \
+ perf.plugin \
+ slabinfo.plugin \
+ systemd-journal.plugin; do \
+ [ -f "/usr/libexec/netdata/plugins.d/$name" ] && chmod 4755 "/usr/libexec/netdata/plugins.d/$name"; \
+ done && \
+ # Group write permissions due to: https://github.com/netdata/netdata/pull/6543
+ find /var/lib/netdata /var/cache/netdata -type d -exec chmod 0770 {} \; && \
+ find /var/lib/netdata /var/cache/netdata -type f -exec chmod 0660 {} \; && \
+ cp -va /etc/netdata /etc/netdata.stock
+
+ENTRYPOINT ["/usr/sbin/run.sh"]
+
+HEALTHCHECK --interval=60s --timeout=10s --retries=3 CMD /usr/sbin/health.sh
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
new file mode 100644
index 00000000..528ef592
--- /dev/null
+++ b/packaging/docker/README.md
@@ -0,0 +1,529 @@
+<!--
+title: "Install Netdata with Docker"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/docker/README.md"
+sidebar_label: "Docker"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 40
+-->
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Install Netdata with Docker
+
+## Create a new Netdata Agent container
+
+You can create a new Agent container using either `docker run` or `docker-compose`. After using any method, you can
+visit the Agent dashboard `http://NODE:19999`.
+
+The Netdata container requires different privileges and mounts to provide functionality similar to that provided by
+Netdata installed on the host. Below you can find a list of Netdata components that need these privileges and mounts,
+along with their descriptions.
+
+<details open>
+<summary>Privileges</summary>
+
+| Component | Privileges | Description |
+|:---------------:|:-----------------------------:|--------------------------------------------------------------------------------------------------------------------------|
+| cgroups.plugin | host PID mode, SYS_ADMIN | Container network interfaces monitoring. Map virtual interfaces in the system namespace to interfaces inside containers. |
+| proc.plugin | host network mode | Host system networking stack monitoring. |
+| go.d.plugin | host network mode | Monitoring applications running on the host and inside containers. |
+| local-listeners | host network mode, SYS_PTRACE | Discovering local services/applications. Map open (listening) ports to running services/applications. |
+
+</details>
+
+<details open>
+<summary>Mounts</summary>
+
+| Component | Mounts | Description |
+|:----------------------:|:--------------------------:|--------------------------------------------------------------------------------------------------------------------------------------------|
+| netdata | /etc/os-release | Host info detection. |
+| cgroups.plugin | /sys, /var/run/docker.sock | Docker containers monitoring and name resolution. |
+| go.d.plugin | /var/run/docker.sock | Docker Engine and containers monitoring. See [docker](https://github.com/netdata/go.d.plugin/tree/master/modules/docker#readme) collector. |
+| go.d.plugin | /var/log | Web servers logs tailing. See [weblog](https://github.com/netdata/go.d.plugin/tree/master/modules/weblog#readme) collector. |
+| apps.plugin | /etc/passwd, /etc/group | Monitoring of host system resource usage by each user and user group. |
+| proc.plugin | /proc | Host system monitoring (CPU, memory, network interfaces, disks, etc.). |
+| systemd-journal.plugin | /var/log | Viewing, exploring and analyzing systemd journal logs. |
+
+</details>
+
+### Recommended way
+
+Both methods create a [volume](https://docs.docker.com/storage/volumes/) for Netdata's configuration files
+_within the container_ at `/etc/netdata`.
+See the [configure section](#configure-agent-containers) for details. If you want to access the configuration files from
+your _host_ machine, see [host-editable configuration](#with-host-editable-configuration).
+
+<Tabs>
+<TabItem value="docker_run" label="docker run">
+
+<h3> Using the <code>docker run</code> command </h3>
+
+Run the following command in your terminal to start a new container.
+
+```bash
+docker run -d --name=netdata \
+ --pid=host \
+ --network=host \
+ -v netdataconfig:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ -v /var/log:/host/var/log:ro \
+ -v /var/run/docker.sock:/var/run/docker.sock:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --cap-add SYS_ADMIN \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+</TabItem>
+<TabItem value="docker compose" label="docker-compose">
+
+<h3> Using the <code>docker-compose</code> command</h3>
+
+Create a file named `docker-compose.yml` in your project directory and paste the code below. From your project
+directory, start Netdata by running `docker-compose up -d`.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ pid: host
+ network_mode: host
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ - /var/log:/host/var/log:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+
+volumes:
+ netdataconfig:
+ netdatalib:
+ netdatacache:
+```
+
+</TabItem>
+</Tabs>
+
+> :bookmark_tabs: Note
+>
+> If you plan to Claim the node to Netdata Cloud, you can find the command with the right parameters by clicking the "
+> Add Nodes" button in your Space's "Nodes" view.
+
+### With systemd units monitoring
+
+Monitoring systemd units requires mounting `/run/dbus`. This mount is not available on non-systemd systems, so we cannot
+use it in the Recommended Way.
+
+Mounting `/run/dbus` provides:
+
+- [go.d/systemdunits](https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits#readme).
+- Systemd-list-units function: information about all systemd units, including their active state, description, whether
+ they are enabled, and more.
+
+<Tabs>
+<TabItem value="docker_run" label="docker run">
+
+<h3> Using the <code>docker run</code> command </h3>
+
+Add `-v /run/dbus:/run/dbus:ro` to your `docker run`.
+
+</TabItem>
+<TabItem value="docker compose" label="docker-compose">
+
+<h3> Using the <code>docker-compose</code> command</h3>
+
+Add `- /run/dbus:/run/dbus:ro` to the netdata service `volumes`.
+
+</TabItem>
+</Tabs>
+
+### With host-editable configuration
+
+Use a [bind mount](https://docs.docker.com/storage/bind-mounts/) for `/etc/netdata` rather than a volume.
+
+This example assumes that you have created `netdataconfig/` in your home directory.
+
+```bash
+mkdir netdataconfig
+```
+
+<Tabs>
+<TabItem value="docker_run" label="docker run">
+
+<h3> Using the <code>docker run</code> command </h3>
+
+Run the following command in your terminal to start a new container.
+
+```bash
+docker run -d --name=netdata \
+ --pid=host \
+ --network=host \
+ -v $(pwd)/netdataconfig/netdata:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ -v /var/log:/host/var/log:ro \
+ -v /var/run/docker.sock:/var/run/docker.sock:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --cap-add SYS_ADMIN \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+</TabItem>
+<TabItem value="docker compose" label="docker-compose">
+
+<h3> Using the <code>docker-compose</code> command</h3>
+
+Create a file named `docker-compose.yml` in your project directory and paste the code below. From your project
+directory, start Netdata by running `docker-compose up -d`.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ pid: host
+ network_mode: host
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - ./netdataconfig/netdata:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ - /var/log:/host/var/log:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+
+volumes:
+ netdatalib:
+ netdatacache:
+```
+
+</TabItem>
+</Tabs>
+
+### With SSL/TLS enabled HTTP Proxy
+
+For a permanent installation on a public server, you
+should [secure the Netdata instance](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md). This
+section contains an example of how to install Netdata with an SSL reverse proxy and basic authentication.
+
+You can use the following `docker-compose.yml` and Caddyfile files to run Netdata with Docker. Replace the domains and
+email address for [Let's Encrypt](https://letsencrypt.org/) before starting.
+
+#### Caddyfile
+
+This file needs to be placed in `/opt` with name `Caddyfile`. Here you customize your domain, and you need to provide
+your email address to obtain a Let's Encrypt certificate. Certificate renewal will happen automatically and will be
+executed internally by the caddy server.
+
+```caddyfile
+netdata.example.org {
+ reverse_proxy netdata:19999
+ tls admin@example.org
+}
+```
+
+#### docker-compose.yml
+
+After setting Caddyfile run this with `docker-compose up -d` to have a fully functioning Netdata setup behind an HTTP reverse
+proxy.
+
+```yaml
+version: '3'
+services:
+ caddy:
+ image: caddy:2
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - /opt/Caddyfile:/etc/caddy/Caddyfile
+ - caddy_data:/data
+ - caddy_config:/config
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: example.com # set to fqdn of host
+ restart: always
+ pid: host
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ - /var/log:/host/var/log:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+volumes:
+ caddy_data:
+ caddy_config:
+ netdatalib:
+ netdatacache:
+```
+
+#### Restrict access with basic auth
+
+You can restrict access by
+following the [official caddy guide](https://caddyserver.com/docs/caddyfile/directives/basicauth#basicauth) and adding lines
+to Caddyfile.
+
+### With Docker socket proxy
+
+Deploy a Docker socket proxy that accepts and filters out requests using something like
+[HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md) or
+[CetusGuard](https://github.com/hectorm/cetusguard) so that it restricts connections to read-only access to
+the `/containers` endpoint.
+
+The reason it's safer to expose the socket to the proxy is because Netdata has a TCP port exposed outside the Docker
+network. Access to the proxy container is limited to only within the network.
+
+#### HAProxy
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ pid: host
+ network_mode: host
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ - /var/log:/host/var/log:ro
+ environment:
+ - DOCKER_HOST=localhost:2375
+ proxy:
+ network_mode: host
+ image: tecnativa/docker-socket-proxy
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ - CONTAINERS=1
+
+volumes:
+ netdataconfig:
+ netdatalib:
+ netdatacache:
+```
+
+**Note:** Replace `2375` with the port of your proxy.
+
+#### CetusGuard
+
+> Note: This deployment method is supported by the community
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ pid: host
+ network_mode: host
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ - /var/log:/host/var/log:ro
+ environment:
+ - DOCKER_HOST=localhost:2375
+ cetusguard:
+ image: hectorm/cetusguard:v1
+ network_mode: host
+ read_only: true
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ CETUSGUARD_BACKEND_ADDR: unix:///var/run/docker.sock
+ CETUSGUARD_FRONTEND_ADDR: tcp://:2375
+ CETUSGUARD_RULES: |
+ ! Inspect a container
+ GET %API_PREFIX_CONTAINERS%/%CONTAINER_ID_OR_NAME%/json
+
+volumes:
+ netdataconfig:
+ netdatalib:
+ netdatacache:
+```
+
+You can run the socket proxy in its own Docker Compose file and leave it on a private network that you can add to
+other services that require access.
+
+## Docker tags
+
+See our full list of Docker images at [Docker Hub](https://hub.docker.com/r/netdata/netdata).
+
+The official `netdata/netdata` Docker image provides the following named tags:
+
+| Tag | Description |
+|:--------:|---------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `stable` | the most recently published stable build. |
+| `edge` | the most recently published nightly build. In most cases, this is updated daily at around 01:00 UTC. |
+| `latest` | the most recently published build, whether it’s a stable build or a nightly build. This is what Docker will use by default if you do not specify a tag. |
+| `vX.Y.Z` | the full version of the release (for example, `v1.40.0`). |
+| `vX.Y` | the major and minor version (for example, `v1.40`). |
+| `vX` | just the major version (for example, `v1`). |
+
+The tags for minor and major versions are updated whenever a release that matches this tag is published (for example,
+if `v1.40.1` were to be published, the `v1.40` tag would be updated to it instead of pointing to `v1.40.0`).
+
+## Configure Agent containers
+
+If you started an Agent container using one of the [recommended methods](#create-a-new-netdata-agent-container), and you
+want to edit Netdata's configuration, you must first use `docker exec` to attach to the container. Replace `netdata`
+with the name of your container.
+
+```bash
+docker exec -it netdata bash
+cd /etc/netdata
+./edit-config netdata.conf
+```
+
+You need to restart the Agent to apply changes. Exit the container if you haven't already, then use the `docker` command
+to restart the container: `docker restart netdata`.
+
+### Change the default hostname
+
+You can change the hostname of a Docker container, and thus the name that appears in the local dashboard and in Netdata
+Cloud, when creating a new container. If you want to change the hostname of a Netdata container _after_ you started it,
+you can safely stop and remove it. Your configuration and metrics data reside in persistent volumes and are reattached
+to the recreated container.
+
+If you use `docker-run`, use the `--hostname` option with `docker run`.
+
+```bash
+docker run -d --name=netdata \
+ --hostname=my_docker_netdata
+```
+
+If you use `docker-compose`, add a `hostname:` key/value pair into your `docker-compose.yml` file, then create the
+container again using `docker-compose up -d`.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: my_docker_compose_netdata
+```
+
+If you don't want to destroy and recreate your container, you can edit the Agent's `netdata.conf` file directly. See the
+above section on [configuring Agent containers](#configure-agent-containers) to find the appropriate method based on
+how you created the container.
+
+Alternatively, you can directly use the hostname from the node running the container by mounting `/etc/hostname` from
+the host in the container. With `docker run`, this can be done by adding `--volume /etc/hostname:/host/etc/hostname:ro` to
+the options. If you are using Docker Compose, you can add an entry to the container's `volumes` section
+reading `- /etc/hostname:/host/etc/hostname:ro`.
+
+## Adding extra packages at runtime
+
+By default, the official Netdata container images do not include a number of optional runtime dependencies. You
+can add these dependencies, or any other APT packages, at runtime by listing them in the environment variable
+`NETDATA_EXTRA_DEB_PACKAGES`.
+
+Commonly useful packages include:
+
+- `apcupsd`: For monitoring APC UPS devices.
+- `lm-sensors`: For monitoring hardware sensors.
+- `netcat-openbsd`: For IRC alert support.
+
+## Health Checks
+
+Our Docker image provides integrated support for health checks through the standard Docker interfaces.
+
+You can control how the health checks run by using the environment variable `NETDATA_HEALTHCHECK_TARGET` as follows:
+
+- If left unset, the health check will attempt to access the `/api/v1/info` endpoint of the agent.
+- If set to the exact value 'cli', the health check script will use `netdatacli ping` to determine if the agent is
+ running correctly or not. This is sufficient to ensure that Netdata did not hang during startup, but does not provide
+ a rigorous verification that the daemon is collecting data or is otherwise usable.
+- If set to anything else, the health check will treat the value as a URL to check for a 200 status code on. In most
+ cases, this should start with `http://localhost:19999/` to check the agent running in the container.
+
+In most cases, the default behavior of checking the `/api/v1/info` endpoint will be sufficient. If you are using a
+configuration which disables the web server or restricts access to certain APIs, you will need to use a non-default
+configuration for health checks to work.
+
+## Publish a test image to your own repository
+
+At Netdata, we provide multiple ways of testing your Docker images using your own repositories.
+You may either use the command line tools available or take advantage of our GitHub Actions infrastructure.
diff --git a/packaging/docker/gen-cflags.sh b/packaging/docker/gen-cflags.sh
new file mode 100755
index 00000000..f5ccab8a
--- /dev/null
+++ b/packaging/docker/gen-cflags.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+if [ -n "${CFLAGS}" ]; then
+ echo "${CFLAGS}"
+elif [ -n "${DEBUG_BUILD}" ]; then
+ echo "-ffunction-sections -fdata-sections -Og -ggdb -pipe"
+else
+ echo "-ffunction-sections -fdata-sections -O2 -funroll-loops -pipe"
+fi
diff --git a/packaging/docker/health.sh b/packaging/docker/health.sh
new file mode 100755
index 00000000..00adf97a
--- /dev/null
+++ b/packaging/docker/health.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# This is the script that gets run for our Docker image health checks.
+
+if [ -z "${NETDATA_HEALTHCHECK_TARGET}" ] ; then
+ # If users didn't request something else, query `/api/v1/info`.
+ PORT=${NETDATA_LISTENER_PORT:-19999}
+ NETDATA_HEALTHCHECK_TARGET="http://localhost:${PORT}/api/v1/info"
+fi
+
+case "${NETDATA_HEALTHCHECK_TARGET}" in
+ cli)
+ netdatacli ping || exit 1
+ ;;
+ *)
+ curl -sSL "${NETDATA_HEALTHCHECK_TARGET}" || exit 1
+ ;;
+esac
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
new file mode 100755
index 00000000..41557332
--- /dev/null
+++ b/packaging/docker/run.sh
@@ -0,0 +1,120 @@
+#!/usr/bin/env bash
+#
+# Entry point script for netdata
+#
+# Copyright: 2018 and later Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+# Author : Austin S. Hemmelgarn <austin@netdata.cloud>
+set -e
+
+if [ ! -w / ] && [ "${EUID}" -eq 0 ]; then
+ echo >&2 "WARNING: This Docker host appears to not properly support newer stat system calls. This is known to cause issues with Netdata (most notably, nodes running on such hosts **cannot be claimed**)."
+ echo >&2 "WARNING: For more information, see https://learn.netdata.cloud/docs/agent/claim#known-issues-on-older-hosts-with-seccomp-enabled"
+fi
+
+if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
+ [ -n "$DISABLE_TELEMETRY" ] ||
+ [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] ||
+ [ -n "$DO_NOT_TRACK" ]; then
+ touch /etc/netdata/.opt-out-from-anonymous-statistics
+fi
+
+chmod o+rX / 2>/dev/null || echo "Unable to change permissions without errors."
+
+BALENA_PGID=$(stat -c %g /var/run/balena.sock 2>/dev/null || true)
+DOCKER_PGID=$(stat -c %g /var/run/docker.sock 2>/dev/null || true)
+
+re='^[0-9]+$'
+if [[ $BALENA_PGID =~ $re ]]; then
+ echo "Netdata detected balena-engine.sock"
+ DOCKER_HOST='/var/run/balena-engine.sock'
+ PGID="$BALENA_PGID"
+elif [[ $DOCKER_PGID =~ $re ]]; then
+ echo "Netdata detected docker.sock"
+ DOCKER_HOST="/var/run/docker.sock"
+ PGID="$DOCKER_PGID"
+fi
+export PGID
+export DOCKER_HOST
+
+if [ -n "${PGID}" ]; then
+ echo "Creating docker group ${PGID}"
+ addgroup --gid "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
+ echo "Assign netdata user to docker group ${PGID}"
+ usermod --append --groups "docker" "${DOCKER_USR}" || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
+fi
+
+# Needed to read Proxmox VMs and (LXC) containers configuration files (name resolution + CPU and memory limits)
+function add_netdata_to_proxmox_conf_files_group() {
+ group_guid="$(stat -c %g /host/etc/pve 2>/dev/null || true)"
+ [ -z "${group_guid}" ] && return
+
+ if ! getent group "${group_guid}" >/dev/null; then
+ echo "Creating proxmox-etc-pve group with GID ${group_guid}"
+ if ! addgroup -g "${group_guid}" "proxmox-etc-pve"; then
+ echo >&2 "Failed to add group proxmox-etc-pve with GID ${group_guid}."
+ return
+ fi
+ fi
+
+ if ! getent group "${group_guid}" | grep -q netdata; then
+ echo "Assign netdata user to group ${group_guid}"
+ if ! usermod -a -G "${group_guid}" "${DOCKER_USR}"; then
+ echo >&2 "Failed to add netdata user to group with GID ${group_guid}."
+ return
+ fi
+ fi
+}
+
+if [ -d "/host/etc/pve" ]; then
+ add_netdata_to_proxmox_conf_files_group || true
+fi
+
+
+if mountpoint -q /etc/netdata; then
+ echo "Copying stock configuration to /etc/netdata"
+ cp -an /etc/netdata.stock/* /etc/netdata
+ cp -an /etc/netdata.stock/.[^.]* /etc/netdata
+fi
+
+if [ -w "/etc/netdata" ]; then
+ if mountpoint -q /etc/netdata; then
+ hostname >/etc/netdata/.container-hostname
+ else
+ rm -f /etc/netdata/.container-hostname
+ fi
+fi
+
+if [ -n "${NETDATA_CLAIM_URL}" ] && [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ ! -f /var/lib/netdata/cloud.d/claimed_id ]; then
+ # shellcheck disable=SC2086
+ /usr/sbin/netdata-claim.sh -token="${NETDATA_CLAIM_TOKEN}" \
+ -url="${NETDATA_CLAIM_URL}" \
+ ${NETDATA_CLAIM_ROOMS:+-rooms="${NETDATA_CLAIM_ROOMS}"} \
+ ${NETDATA_CLAIM_PROXY:+-proxy="${NETDATA_CLAIM_PROXY}"} \
+ ${NETDATA_EXTRA_CLAIM_OPTS} \
+ -daemon-not-running
+fi
+
+if [ -n "${NETDATA_EXTRA_APK_PACKAGES}" ]; then
+ echo >&2 "WARNING: Netdata’s Docker images have switched from Alpine to Debian as a base platform. Supplementary package support is now handled through the NETDATA_EXTRA_DEB_PACKAGES variable instead of NETDATA_EXTRA_APK_PACKAGES."
+ echo >&2 "WARNING: The container will still run, but supplementary packages listed in NETDATA_EXTRA_APK_PACKAGES will not be installed."
+ echo >&2 "WARNING: To remove these messages, either undefine NETDATA_EXTRA_APK_PACKAGES, or define it to an empty string."
+fi
+
+if [ -n "${NETDATA_EXTRA_DEB_PACKAGES}" ]; then
+ echo "Fetching APT repository metadata."
+ if ! apt-get update; then
+ echo "Failed to fetch APT repository metadata."
+ else
+ echo "Installing supplementary packages."
+ export DEBIAN_FRONTEND="noninteractive"
+ # shellcheck disable=SC2086
+ if ! apt-get install -y --no-install-recommends ${NETDATA_EXTRA_DEB_PACKAGES}; then
+ echo "Failed to install supplementary packages."
+ fi
+ fi
+fi
+
+exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_LISTENER_PORT}" "$@"
diff --git a/packaging/ebpf-co-re.checksums b/packaging/ebpf-co-re.checksums
new file mode 100644
index 00000000..c51f3ef5
--- /dev/null
+++ b/packaging/ebpf-co-re.checksums
@@ -0,0 +1 @@
+7ef8d2a0f485b4c81942f66c50e1aedcd568b7997a933c50c0ebbd8353543c08 netdata-ebpf-co-re-glibc-v1.2.8.tar.xz
diff --git a/packaging/ebpf-co-re.version b/packaging/ebpf-co-re.version
new file mode 100644
index 00000000..d1f79a94
--- /dev/null
+++ b/packaging/ebpf-co-re.version
@@ -0,0 +1 @@
+v1.2.8
diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums
new file mode 100644
index 00000000..28f023d5
--- /dev/null
+++ b/packaging/ebpf.checksums
@@ -0,0 +1,3 @@
+9035b6b8dda5230c1ddc44991518a3ee069bd497ad5a8e5448b79dc4b8c51c43 ./netdata-kernel-collector-glibc-v1.2.8.tar.xz
+e5b1a141475f75c60c282a2e3ce8e3914893e75d474c976bad95f66d4c9846c5 ./netdata-kernel-collector-musl-v1.2.8.tar.xz
+d6081a2fedc9435d1ab430697cb101123cebaac07b62fb91d790ca526923f4e3 ./netdata-kernel-collector-static-v1.2.8.tar.xz
diff --git a/packaging/ebpf.version b/packaging/ebpf.version
new file mode 100644
index 00000000..d1f79a94
--- /dev/null
+++ b/packaging/ebpf.version
@@ -0,0 +1 @@
+v1.2.8
diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums
new file mode 100644
index 00000000..addfc0a5
--- /dev/null
+++ b/packaging/go.d.checksums
@@ -0,0 +1,20 @@
+9eeb1a06764fcc7f75e25d56916d1566d4a7206de778856165f407409f091470 *config.tar.gz
+9eeb1a06764fcc7f75e25d56916d1566d4a7206de778856165f407409f091470 *go.d.plugin-config-v0.58.0.tar.gz
+cb9bbbc164e16fdef46ddb3a9aafa354fe83a2765a5aa25a7ceeaa4c60d90eb7 *go.d.plugin-v0.58.0.darwin-amd64.tar.gz
+6d5123955f87ebf30e5faf17c8502616ca84f156ae5e6108cb4a83b79dd0fa6b *go.d.plugin-v0.58.0.darwin-arm64.tar.gz
+04a3ceebb345556cfc3f5dd5230c31d06cf59f8f6a6d85c4e8cfb1497ac2c793 *go.d.plugin-v0.58.0.freebsd-386.tar.gz
+9b530d2d7d387c81d0551888b0aa6b55290910f75c5f01a0d399ca29fa83757f *go.d.plugin-v0.58.0.freebsd-amd64.tar.gz
+45f4d0884b3993d3758f63a453ace96207ebdb9f2d97f89d5e42795ca743c6b6 *go.d.plugin-v0.58.0.freebsd-arm.tar.gz
+d4222a6812255946f5d367cd59e8d6284c36b44baaba2925f7268bc42368a41a *go.d.plugin-v0.58.0.freebsd-arm64.tar.gz
+4d71efc97a8f32db36f1d3f925e97531f846d9c39d66fbe63f00097f9a6cd425 *go.d.plugin-v0.58.0.linux-386.tar.gz
+287db876af5a5b093ee91ef937f4ee59ebc5fdf79e403a48042b9f3cf58c716f *go.d.plugin-v0.58.0.linux-amd64.tar.gz
+c0a4f1a20e2d93e1df7adab651b9feb7ca481b0b04e4e12323cad7b8f39e8590 *go.d.plugin-v0.58.0.linux-arm.tar.gz
+b94adb6df7fc3a04cda1078e82c2d97a514c12dcc12f5dba7cec2259a34c89bb *go.d.plugin-v0.58.0.linux-arm64.tar.gz
+5570b0ebc7c1a45c00301b0212531ee178cc06cb47912330ebc3d3d20bed6b13 *go.d.plugin-v0.58.0.linux-mips.tar.gz
+6a850631e1978fdb6ff27923c3779f85e985dd0adb3cfb3767a482777e1802c8 *go.d.plugin-v0.58.0.linux-mips64.tar.gz
+1ac22842fa52b97efac45f39f36e9fe69bd9a47497d91653563e02c2855ea5ff *go.d.plugin-v0.58.0.linux-mips64le.tar.gz
+2487214cf11430e4152fbccf17205764d91e731aa236b2edb994d8242d33db26 *go.d.plugin-v0.58.0.linux-mipsle.tar.gz
+547e4196cd1ebe07054de74f64bcea5ff704376138a495d6b66a6d3f46b22c5f *go.d.plugin-v0.58.0.linux-ppc64.tar.gz
+3917e4c798cca7d5f944eb983f8facc2227abff88fc12398a277ee38010540cd *go.d.plugin-v0.58.0.linux-ppc64le.tar.gz
+089bff22c63c1b79a0081e3c52e26eacafbea3698f967e5d18cee0c7dd0f88f9 *go.d.plugin-vendor-v0.58.0.tar.gz
+089bff22c63c1b79a0081e3c52e26eacafbea3698f967e5d18cee0c7dd0f88f9 *vendor.tar.gz
diff --git a/packaging/go.d.version b/packaging/go.d.version
new file mode 100644
index 00000000..0bf66171
--- /dev/null
+++ b/packaging/go.d.version
@@ -0,0 +1 @@
+v0.58.0
diff --git a/packaging/installer/README.md b/packaging/installer/README.md
new file mode 100644
index 00000000..a99e869b
--- /dev/null
+++ b/packaging/installer/README.md
@@ -0,0 +1,275 @@
+import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
+import { InstallRegexLink, InstallBoxRegexLink } from '@site/src/components/InstallRegexLink/'
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Install Netdata
+
+This document will guide you through installing the open-source Netdata monitoring Agent on Linux, Docker, Kubernetes, and many others, often with one command.
+
+Netdata is very flexible and can be used to monitor all kinds of infrastructure. Read more about possible [Deployment strategies](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/deployment-strategies.md) to understand what better suites your needs.
+
+## Get started
+
+Netdata is a free and open-source (FOSS) monitoring agent that collects thousands of hardware and software metrics from
+any physical or virtual system (we call them _nodes_). These metrics are organized in an easy-to-use and -navigate interface.
+
+Together with [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you can monitor your entire infrastructure in
+real time and troubleshoot problems that threaten the health of your nodes.
+
+Netdata runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT devices. It
+runs on Linux distributions (Ubuntu, Debian, CentOS, and more), container/microservice platforms (Kubernetes clusters,
+Docker), and many other operating systems (FreeBSD, macOS), with no `sudo` required.
+
+To install Netdata in minutes on your platform:
+
+1. Sign up to <https://app.netdata.cloud/>
+2. You will be presented with an empty space, and a prompt to "Connect Nodes" with the install command for each platform
+3. Select the platform you want to install Netdata to, copy and paste the script into your node's terminal, and run it
+
+Upon installation completing successfully, you should be able to see the node live in your Netdata Space and live charts
+in the Overview tab. [Read more about the cloud features](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md).
+
+Where you go from here is based on your use case, immediate needs, and experience with monitoring and troubleshooting,
+but we have some hints on what you might want to do next.
+
+### What's next?
+
+Explore our [general advanced installation options and troubleshooting](#advanced-installation-options-and-troubleshooting), specific options
+for the [single line installer](#install-on-linux-with-one-line-installer), or [other installation methods](#other-installation-methods).
+
+#### Configuration
+
+Discover the recommended way to [configure Netdata's settings or behavior](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) using our built-in
+`edit-config` script, then apply that knowledge to mission-critical tweaks, such as [changing how long Netdata stores
+metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
+
+#### Data collection
+
+If Netdata didn't autodetect all the hardware, containers, services, or applications running on your node, you should
+learn more about [how data collectors work](https://github.com/netdata/netdata/blob/master/collectors/README.md). If there's a [supported
+collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) for metrics you need, [configure the collector](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md)
+or read about its requirements to configure your endpoint to publish metrics in the correct format and endpoint.
+
+#### Alerts & notifications
+
+Netdata comes with hundreds of preconfigured alerts, designed by our monitoring gurus in parallel with our open-source
+community, but you may want to [edit alerts](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md) or
+[enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to customize your Netdata experience.
+
+#### Make your deployment production ready
+
+Go through our [deployment strategies](https://github.com/netdata/netdata/edit/master/docs/category-overview-pages/deployment-strategies.md),
+for suggested configuration changes for production deployments.
+
+## Install on Linux with one-line installer
+
+The **recommended** way to install Netdata on a Linux node (physical, virtual, container, IoT) is our one-line
+[kickstart script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+This script automatically installs dependencies and builds Netdata from its source code.
+
+To install, copy the script, paste it into your node's terminal, and hit `Enter` to begin the installation process.
+
+ <Tabs>
+ <TabItem value="wget" label=<code>wget</code>>
+
+ <OneLineInstallWget/>
+
+ </TabItem>
+ <TabItem value="curl" label=<code>curl</code>>
+
+ <OneLineInstallCurl/>
+
+ </TabItem>
+</Tabs>
+
+> ### Note
+>
+> If you plan to also claim the node to Netdata Cloud, make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space, and `YOUR_ROOM_ID` with the ID of the room you are claiming to.
+> You can leave the room id blank to have your node claimed to the default "All nodes" room.
+
+Jump up to [what's next](#whats-next) to learn how to view your new dashboard and take your next steps in monitoring and
+troubleshooting with Netdata.
+
+## Other installation methods
+
+<InstallRegexLink>
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)"
+ os="Run with Docker"
+ svg="docker" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md)"
+ os="Deploy on Kubernetes"
+ svg="kubernetes" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md)"
+ os="Install on macOS"
+ svg="macos" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)"
+ os="Native DEB/RPM packages"
+ svg="linux" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)"
+ os="Linux from Git"
+ svg="linux" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md)"
+ os="Linux from source"
+ svg="linux" />
+ <InstallBoxRegexLink
+ to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md)"
+ os="Linux for offline nodes"
+ svg="linux" />
+</InstallRegexLink>
+
+- [Native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)
+- [Run with Docker](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)
+- [Deploy on Kubernetes](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md)
+- [Install on macOS](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md)
+- [Linux from Git](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)
+- [Linux from source](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md)
+- [Linux for offline nodes](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md)
+
+The full list of all installation methods for various systems is available in [Netdata Learn](https://learn.netdata.cloud),
+under [Installation](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/installation-overview.md).
+
+## Advanced installation options and troubleshooting
+
+### Automatic updates
+
+By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels.
+
+If you would prefer to update your Netdata agent manually, you can disable automatic updates by using the `--no-updates`
+option when you install or update Netdata using the [automatic one-line installation
+script](#automatic-one-line-installation-script).
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --no-updates
+```
+
+With automatic updates disabled, you can choose exactly when and how you [update
+Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md).
+
+#### Network usage of Netdata’s automatic updater
+
+The auto-update functionality set up by the installation scripts requires working internet access to function
+correctly. In particular, it currently requires access to GitHub (to check if a newer version of the updater script
+is available or not, as well as potentially fetching build-time dependencies that are bundled as part of the install),
+and Google Cloud Storage (to check for newer versions of Netdata and download the sources if there is a newer version).
+
+Note that the auto-update functionality will check for updates to itself independently of updates to Netdata,
+and will try to use the latest version of the updater script whenever possible. This is intended to reduce the
+amount of effort required by users to get updates working again in the event of a bug in the updater code.
+
+### Nightly vs. stable releases
+
+The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's
+installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration.
+
+**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws,
+or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when
+we're ready, we simply change the release tags on GitHub. That means nightly releases are stable and proven to function
+correctly in the vast majority of Netdata use cases. That's why nightly is the _best choice for most Netdata users_.
+
+**Stable**: We create stable releases whenever we believe the code has reached a major milestone. Most often, stable
+releases correlate with the introduction of new, significant features. Stable releases might be a better choice for
+those who run Netdata in _mission-critical production systems_, as updates will come more infrequently, and only after
+the community helps fix any bugs that might have been introduced in previous releases.
+
+**Pros of using nightly releases:**
+
+- Get the latest features and bug fixes as soon as they're available
+- Receive security-related fixes immediately
+- Use stable, fully-tested code that's always improving
+- Leverage the same Netdata experience our community is using
+
+**Pros of using stable releases:**
+
+- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata
+ installation
+- Retain more control over the Netdata version you use
+
+### Anonymous statistics
+
+Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self-hosted PostHog instance within the Netdata infrastructure. Read about the information collected, and learn how to-opt, on our [anonymous statistics](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) page.
+
+The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for
+_actively_ contributing to Netdata's future.
+
+### Troubleshooting and known issues
+
+We are tracking a few issues related to installation and packaging.
+
+#### Installs on hosts without IPv4 connectivity
+
+Our regular installation process requires access to a number of GitHub services that do not have IPv6 connectivity. As
+such, using the kickstart install script on such hosts generally does not work, and will typically fail with an
+error from cURL or wget about connection timeouts. You can check if your system is affected by this by attempting
+to connect to (or ping) `https://api.github.com/`. Failing to connect indicates that you are affected by this issue.
+
+There are three potential workarounds for this:
+
+1. You can configure your system with a proper IPv6 transition mechanism, such as NAT64. GitHub’s anachronisms
+ affect many projects other than just Netdata, and there are unfortunately a number of other services out there
+ that do not provide IPv6 connectivity, so taking this route is likely to save you time in the future as well.
+2. If you are using a system that we publish native packages for (see our [platform support
+ policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md) for more details),
+ you can manually set up our native package repositories as outlined in our [native package install
+ documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md). Our official
+ package repositories do provide service over IPv6, so they work without issue on hosts without IPv4 connectivity.
+3. If neither of the above options work for you, you can still install using our [offline installation
+ instructions](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md), though
+ do note that the offline install source must be prepared from a system with IPv4 connectivity.
+
+#### Older distributions (Ubuntu 14.04, Debian 8, CentOS 6) and OpenSSL
+
+If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS
+6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old
+versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which
+helps securely encrypt SSL connections.
+
+If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit
+with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to
+man-in-the-middle attacks.
+
+#### CentOS 6 and CentOS 8
+
+To install the Agent on certain CentOS and RHEL systems, you must enable non-default repositories, such as EPEL or
+PowerTools, to gather hard dependencies. See the [CentOS 6](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#centos--rhel-6x) and
+[CentOS 8](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#centos--rhel-8x) sections for more information.
+
+#### Access to file is not permitted
+
+If you see an error similar to `Access to file is not permitted: /usr/share/netdata/web/index.html` when you try to
+visit the Agent dashboard at `http://NODE:19999`, you need to update Netdata's permissions to match those of your
+system.
+
+Run `ls -la /usr/share/netdata/web/index.html` to find the file's permissions. You may need to change this path based on
+the error you're seeing in your browser. In the below example, the file is owned by the user `root` and the group
+`root`.
+
+```bash
+ls -la /usr/share/netdata/web/index.html
+-rw-r--r--. 1 root root 89377 May 5 06:30 /usr/share/netdata/web/index.html
+```
+
+These files need to have the same user and group used to install your netdata. Suppose you installed netdata with user
+`netdata` and group `netdata`, in this scenario you will need to run the following command to fix the error:
+
+```bash
+# chown -R netdata:netdata /usr/share/netdata/web
+```
+
+#### Multiple versions of OpenSSL
+
+We've received reports from the community about issues with running the `kickstart.sh` script on systems that have both
+a distribution-installed version of OpenSSL and a manually-installed local version. The Agent's installer cannot handle
+both.
+
+#### Clang compiler on Linux
+
+Our current build process has some issues when using certain configurations of the `clang` C compiler on Linux. See [the
+section on `nonrepresentable section on output`
+errors](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#nonrepresentable-section-on-output-errors) for a workaround.
diff --git a/packaging/installer/REINSTALL.md b/packaging/installer/REINSTALL.md
new file mode 100644
index 00000000..82cea498
--- /dev/null
+++ b/packaging/installer/REINSTALL.md
@@ -0,0 +1,66 @@
+# Reinstall Netdata
+
+In certain situations, such as needing to enable a feature or troubleshoot an issue, you may need to reinstall the
+Netdata Agent on your node.
+
+## One-line installer script (`kickstart.sh`)
+
+### Reinstalling with the same install type
+
+Run the one-line installer script with the `--reinstall` parameter to reinstall the Netdata Agent. This will preserve
+any [user configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) in `netdata.conf` or other files, and will keep the same install
+type that was used for the original install.
+
+If you used any [optional
+parameters](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
+installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you
+used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of
+optional parameters.
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall
+```
+
+### Performing a clean reinstall
+
+Run the one-line installer script with the `--reinstall-clean` parameter to perform a clean reinstall of the
+Netdata Agent. This will wipe all existing configuration and historical data, but can be useful sometimes for
+getting a badly broken installation working again. Unlike the regular `--reinstall` parameter, this may use a
+different install type than the original install used.
+
+If you used any [optional
+parameters](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
+installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you
+used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of
+optional parameters.
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall-clean
+```
+
+### Changing the install type of an existing installation
+
+The clean reinstall procedure outlined above can also be used to manually change the install type for an existing
+installation. Without any extra parameters, it will automatically pick the preferred installation type for your
+system, even if that has changed since the original install. If you want to force use of a specific install type,
+you can use the `--native-only`, `--static-only`, or `--build-only` parameter to control which install type gets
+used, just like with a new install.
+
+When using the `--reinstall-clean` option to change the install type, you will need to manually preserve any
+configuration or historical data you want to keep. The following directories may need to be preserved:
+
+- `/etc/netdata` (`/opt/netdata/etc/netdata` for static installs): For agent configuration.
+- `/var/lib/netdata` (`/opt/netdata/var/lib/netdata` for static installs): For claiming configuration.
+- `/var/cache/netdata` (`/opt/netdata/var/cache/netdata` for static installs): For historical data.
+
+When copying these directories back after the reinstall, you may need to update file ownership by running `chown
+-R netdata:netdata` on them.
+
+## Troubleshooting
+
+If you still experience problems with your Netdata Agent installation after following one of these processes, the next
+best route is to [uninstall](https://github.com/netdata/netdata/blob/master/packaging/installer/UNINSTALL.md) and then try a fresh installation using the [one-line
+installer](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+
+You can also post to our [community forums](https://community.netdata.cloud/c/support/13) or create a new [bug
+report](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml).
diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md
new file mode 100644
index 00000000..a66bd7a2
--- /dev/null
+++ b/packaging/installer/UNINSTALL.md
@@ -0,0 +1,85 @@
+# Uninstall Netdata
+
+> ### Note
+>
+> If you're having trouble updating Netdata, moving from one installation method to another, or generally having
+> issues with your Netdata Agent installation, consider our [reinstalling Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md) instead of removing the Netdata Agent entirely.
+
+The recommended method to uninstall Netdata on a system is to use our kickstart installer script with the `--uninstall` option like so:
+
+```sh
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
+```
+
+Or (if you have curl but not wget):
+
+```sh
+curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
+```
+
+This will work in most cases without you needing to do anything more other than accepting removal of configuration
+and data files.
+
+If you used a non-standard installation prefix, you may need to specify that prefix using the `--old-install-prefix`
+option when uninstalling this way.
+
+## Unofficial installs
+
+If you used a third-party package to install Netdata, then the above method will usually not work, and you will
+need to use whatever mechanism you used to originally install Netdata to uninstall it.
+
+## Uninstalling manually
+
+Most official installs of Netdata include an uninstaller script that can be manually invoked instead of using the
+kickstart script (internally, the kickstart script also uses this uninstaller script, it just handles the process
+outlined below for you).
+
+This uninstaller script is self-contained other than requiring a `.environment` file that was generated during
+installation. In most cases, this will be found in `/etc/netdata/.environment`, though if you used a non-standard
+installation prefix it will usually be located in a similar place under that prefix.
+
+A workflow for uninstallation looks like this:
+
+1. Find your `.environment` file, which is usually `/etc/netdata/.environment` in a default installation.
+2. If you cannot find that file and would like to uninstall Netdata, then create a new file with the following content:
+
+```sh
+NETDATA_PREFIX="<installation prefix>" # put what you used as a parameter to shell installed `--install-prefix` flag. Otherwise it should be empty
+NETDATA_ADDED_TO_GROUPS="<additional groups>" # Additional groups for a user running the Netdata process
+```
+
+3. Run `netdata-uninstaller.sh` as follows
+
+ 3.1 **Interactive mode (Default)**
+
+ The default mode in the uninstaller script is **interactive**. This means that the script provides you
+ the option to reply with "yes" (`y`/`Y`) or "no" (`n`/`N`) to control the removal of each Netdata asset in
+ the filesystem.
+
+ ```sh
+ ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env <environment_file>
+ ```
+
+ 3.2 **Non-interactive mode**
+
+ If you are sure and you know what you are doing, you can speed up the removal of the Netdata assets from the
+ filesystem without any questions by using the force option (`-f`/`--force`). This option will remove all the
+ Netdata assets in a **non-interactive** mode.
+
+ ```sh
+ ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --force --env <environment_file>
+ ```
+
+Note: Existing installations may still need to download the file if it's not present. To execute uninstall in that case,
+run the following commands:
+
+```sh
+wget https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-uninstaller.sh
+chmod +x ./netdata-uninstaller.sh
+./netdata-uninstaller.sh --yes --env <environment_file>
+```
+
+The default `environment_file` is `/etc/netdata/.environment`.
+
+> Note: This uninstallation method assumes previous installation with `netdata-installer.sh` or the kickstart script.
+> Using it when Netdata was installed in some other way will usually not work correctly, and may make it harder to uninstall Netdata.
diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md
new file mode 100644
index 00000000..7275ee52
--- /dev/null
+++ b/packaging/installer/UPDATE.md
@@ -0,0 +1,209 @@
+# Update Netdata
+
+By default, the Netdata Agent automatically updates with the latest nightly or stable version depending on which
+you installed. If you opted out of automatic updates, you need to update your Netdata Agent to the latest nightly
+or stable version. You can also [enable or disable automatic updates on an existing install](#control-automatic-updates).
+
+> 💡 Looking to reinstall the Netdata Agent to enable a feature, update an Agent that cannot update automatically, or
+> troubleshoot an error during the installation process? See our [reinstallation doc](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md)
+> for reinstallation steps.
+
+Before you update the Netdata Agent, check to see if your Netdata Agent is already up-to-date by clicking on the update
+icon in the local Agent dashboard's top navigation. This modal informs you whether your Agent needs an update or not.
+
+The exact update method to use depends on the install type:
+
+- Installs with an install type of 'custom' usually indicate installing a third-party package through the system
+ package manager. To update these installs, you should update the package just like you would any other package
+ on your system.
+- Installs with an install type starting with `binpkg` or ending with `build` or `static` can be updated using
+ our [regular update method](#updates-for-most-systems).
+- Installs with an install type of 'oci' were created from our official Docker images, and should be updated
+ using our [Docker](#docker) update procedure.
+- macOS users should check [our update instructions for macOS](#macos).
+- Manually built installs should check [our update instructions for manual builds](#manual-installation-from-git).
+
+## Determine which installation method you used
+
+Starting with netdata v1.33.0, you can use Netdata itself to determine the installation type by running:
+
+```bash
+netdata -W buildinfo | grep -E 'Installation Type|Install type:'
+```
+
+The following table contains all possible installation types:
+
+| Installation-type | Description |
+|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| binpkg-rpm | RPM-based native packages shipped from Netdata's repos. |
+| binpkg-deb | DEB-based native packages shipped from Netdata's repos. |
+| kickstart-build | Build from source with the kickstart script's `--build-only` option. |
+| kickstart-static | Installed the static builds, shipped from netdata via the kickstart script's (option: `--static-only`). |
+| manual-static-ARCH | Manually installed static Agent binaries by downloading archives from GitHub and installing them manually. Offline installations are part of this category. |
+| legacy-build | Used for pre-existing kickstart.sh or netdata-installer.sh installations. This exist because we cannot determine how the install originally happened. |
+| legacy-static | Same as legacy-build, but for static installs. |
+| oci | Installed using official Docker images from Netdata, though not necessarily running on Docker |
+| custom | Anything not covered by the other identifiers, including manual builds, manually running netdata-installer.sh, and third-party packages (community). |
+| Unknown | Same as custom. |
+
+
+If you are using an older version of Netdata, or the above command produces no output, you can run our one-line
+installation script in dry-run mode to attempt to determine what method to use to update by running the following
+command:
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --dry-run
+```
+
+Note that if you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option
+specifying that prefix to make sure it finds the existing install.
+
+If you see a line starting with `--- Would attempt to update existing installation by running the updater script
+located at:`, then our [regular update method](#updates-for-most-systems) will work for you.
+
+Otherwise, it should either indicate that the installation type is not supported (which probably means you either
+have a `custom` install or built Netdata manually) or indicate that it would create a new install (which means that
+you either used a non-standard install path, or that you don’t actually have Netdata installed).
+
+## Updates for most systems
+
+In most cases, you can update netdata using our one-line installation script. This script will automatically
+run the update script that was installed as part of the initial install (even if you disabled automatic updates)
+and preserve the existing install options you specified.
+
+If you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option specifying
+that prefix to this command to make sure it finds Netdata.
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh
+```
+
+### Issues with older binpkg installs
+
+The above command is known not to work with binpkg type installs for stable releases with a version number of
+v1.33.1 or earlier, and nightly builds with a version number of v1.33.1-93 or earlier. If you have such a system,
+the above command will report that it found an existing install, and then issue a warning about not being able to
+find the updater script.
+
+On such installs, you can update Netdata using your distribution package manager.
+
+### Updates on hosts without IPv4 connectivity
+
+The update process outlined above suffers from the same issues that installing on hosts without IPv4
+connectivity does, and requires similar workarounds. For more details check [the explanation in our install
+documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#installs-on-hosts-without-ipv4-connectivity).
+
+### If the kickstart script does not work
+
+If the above command fails, you can [reinstall
+Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md#one-line-installer-script-kickstartsh) to get the latest version. This
+also preserves your [configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) in `netdata.conf` or other files just like updating
+normally would, though you will need to specify any installation options you used originally again.
+
+## Docker
+
+Docker-based installations do not update automatically. To update an Netdata Agent running in a Docker container, you
+must pull the [latest image from Docker Hub](https://hub.docker.com/r/netdata/netdata), stop and remove the container,
+and re-create it using the latest image.
+
+First, pull the latest version of the image.
+
+```bash
+docker pull netdata/netdata:latest
+```
+
+Next, to stop and remove any containers using the `netdata/netdata` image. Replace `netdata` if you changed it from the
+default.
+
+```bash
+docker stop netdata
+docker rm netdata
+```
+
+You can now re-create your Netdata container using the `docker` command or a `docker-compose.yml` file. See our [Docker
+installation instructions](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md#create-a-new-netdata-agent-container) for details.
+
+## macOS
+
+If you installed Netdata on your macOS system using Homebrew, you can explicitly request an update:
+
+```bash
+brew upgrade netdata
+```
+
+Homebrew downloads the latest Netdata via the
+[formulae](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb), ensures all dependencies are met,
+and updates Netdata via reinstallation.
+
+If you instead installed Netdata using our one-line installation script, you can use our [regular update
+instructions](#updates-for-most-systems) to update Netdata.
+
+## Manual installation from Git
+
+If you installed [Netdata manually from Git](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md), you can run that installer again
+to update your agent. First, run our automatic requirements installer, which works on many Linux distributions, to
+ensure your system has the dependencies necessary for new features.
+
+```bash
+bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh)
+```
+
+Navigate to the directory where you first cloned the Netdata repository, pull the latest source code, and run
+`netdata-install.sh` again. This process compiles Netdata with the latest source code and updates it via reinstallation.
+
+```bash
+cd /path/to/netdata/git
+git pull origin master
+sudo ./netdata-installer.sh
+```
+
+> ⚠️ If you installed Netdata with any optional parameters, such as `--no-updates` to disable automatic updates, and
+> want to retain those settings, you need to set them again during this process.
+
+## Control automatic updates
+
+Starting with Netdata v1.34.0, you can easily enable or disable automatic updates on an existing installation
+using the updater script.
+
+For most installs on Linux, you can enable auto-updates with:
+
+```bash
+/usr/libexec/netdata/netdata-updater.sh --enable-auto-updates
+```
+
+and disable them with:
+
+```bash
+/usr/libexec/netdata/netdata-updater.sh --disable-auto-updates
+```
+
+For static installs, instead use:
+
+```bash
+/opt/netdata/usr/libexec/netdata/netdata-updater.sh --enable-auto-updates
+```
+
+and:
+
+```bash
+/opt/netdata/usr/libexec/netdata/netdata-updater.sh --disable-auto-updates
+```
+
+## Control runtime behavior of the updater script.
+
+Starting with v1.40.0, the `netdata-updater.sh` script supports a config file called `netdata-updater.conf`,
+located in the same directory as the main `netdata.conf` file. This file uses POSIX shell script syntax to define
+variables that are used by the updater.
+
+This configuration file can be edited [using our `edit-config`
+script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md).
+
+The following configuration options are currently supported:
+
+- `NETDATA_UPDATER_JITTER`: Sets an upper limit in seconds on the random delay in the updater script when running
+ as a scheduled task. This random delay helps avoid issues resulting from too many nodes trying to reconnect to
+ the Cloud at the same time. The default value is 3600, which corresponds to one hour. Most users should not ever
+ need to change this.
+- `NETDATA_NO_SYSTEMD_JOURNAL`: If set to a value other than 0, skip attempting to install the
+ `netdata-plugin-systemd-journal` package on supported systems on update. This optional package will be installed
+ by default on supported systems by the updater if this option is not set. Only affects systems using native packages.
diff --git a/packaging/installer/dependencies/alpine.sh b/packaging/installer/dependencies/alpine.sh
new file mode 100755
index 00000000..ee0504b3
--- /dev/null
+++ b/packaging/installer/dependencies/alpine.sh
@@ -0,0 +1,114 @@
+#!/bin/sh
+# Package tree used for installing netdata on distribution:
+# << Alpine: [3.12] [3.13] [3.14] [3.15] [edge] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+package_tree="
+ alpine-sdk
+ git
+ gcc
+ g++
+ automake
+ autoconf
+ cmake
+ make
+ libatomic
+ libtool
+ pkgconfig
+ tar
+ curl
+ gzip
+ libuv-dev
+ lz4-dev
+ openssl-dev
+ elfutils-dev
+ python3
+ zlib-dev
+ util-linux-dev
+ libmnl-dev
+ json-c-dev
+ musl-fts-dev
+ bison
+ flex
+ yaml-dev
+ "
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ printf "Press ENTER to run it > "
+ read -r || exit 1
+ fi
+}
+
+# shellcheck disable=2068
+check_flags ${@}
+
+packages_to_install=
+
+handle_old_alpine() {
+ version="$(grep VERSION_ID /etc/os-release | cut -f 2 -d '=')"
+ major="$(echo "${version}" | cut -f 1 -d '.')"
+ minor="$(echo "${version}" | cut -f 2 -d '.')"
+
+ if [ "${major}" -le 3 ] && [ "${minor}" -le 16 ]; then
+ package_tree="$(echo "${package_tree}" | sed 's/musl-fts-dev/fts-dev/')"
+ fi
+}
+
+for package in $package_tree; do
+ if apk -e info "$package" > /dev/null 2>&1 ; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '${package}' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [ -z "${packages_to_install}" ]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "$packages_to_install"
+ opts="--force-broken-world"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ else
+ opts="${opts} -i"
+ fi
+ # shellcheck disable=SC2086
+ apk add ${opts} $packages_to_install
+fi
diff --git a/packaging/installer/dependencies/arch.sh b/packaging/installer/dependencies/arch.sh
new file mode 100755
index 00000000..30be834b
--- /dev/null
+++ b/packaging/installer/dependencies/arch.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << ArchLinux: [base] [base-devel] >> | << Manjaro >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+declare -a package_tree=(
+ gcc
+ make
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ libtool
+ cmake
+ zlib
+ util-linux
+ libmnl
+ json-c
+ libyaml
+ libuv
+ lz4
+ openssl
+ libelf
+ git
+ pkgconfig
+ tar
+ curl
+ gzip
+ python3
+ binutils
+ bison
+ flex
+)
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if pacman -Qn "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install: " "${packages_to_install[@]}"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="--noconfirm"
+ fi
+ # shellcheck disable=SC2068
+ pacman -Sy ${opts} ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/centos.sh b/packaging/installer/dependencies/centos.sh
new file mode 100755
index 00000000..532a0a71
--- /dev/null
+++ b/packaging/installer/dependencies/centos.sh
@@ -0,0 +1,202 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << CentOS: [7] [8] [9] >>
+
+set -e
+
+declare -a package_tree=(
+ autoconf
+ autoconf-archive
+ automake
+ bison
+ cmake
+ cmake3
+ curl
+ elfutils-libelf-devel
+ flex
+ findutils
+ gcc
+ gcc-c++
+ git
+ gzip
+ json-c-devel
+ libatomic
+ libmnl-devel
+ libtool
+ libuuid-devel
+ libuv-devel
+ libyaml-devel
+ lz4-devel
+ make
+ openssl-devel
+ pkgconfig
+ python3
+ systemd-devel
+ tar
+ zlib-devel
+)
+
+os_version() {
+ if [[ -f /etc/os-release ]]; then
+ # shellcheck disable=SC2002
+ cat /etc/os-release | grep VERSION_ID | cut -d'=' -f2 | cut -d'"' -f2
+ else
+ echo "Error: Cannot determine OS version!"
+ exit 1
+ fi
+}
+
+prompt() {
+ if [[ "${NON_INTERACTIVE}" == "1" ]]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have prompted for ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [[ "${DONT_WAIT}" == "0" ]] && [[ "${NON_INTERACTIVE}" == "0" ]]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+validate_tree_centos() {
+ local opts=
+ package_manager=
+ if [[ "${NON_INTERACTIVE}" == "1" ]]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > CentOS Version: $(os_version) ..."
+
+ if [[ $(os_version) =~ ^9(\..*)?$ ]]; then
+ package_manager=dnf
+ echo >&2 " > Checking for config-manager ..."
+ if ! dnf config-manager --help &> /dev/null; then
+ if prompt "config-manager not found, shall I install it?"; then
+ dnf ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for CRB ..."
+ if ! dnf repolist | grep CRB; then
+ if prompt "CRB not found, shall I install it?"; then
+ dnf ${opts} config-manager --set-enabled crb
+ fi
+ fi
+ elif [[ $(os_version) =~ ^8(\..*)?$ ]]; then
+ package_manager=dnf
+ echo >&2 " > Checking for config-manager ..."
+ if ! dnf config-manager --help &> /dev/null; then
+ if prompt "config-manager not found, shall I install it?"; then
+ dnf ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for PowerTools ..."
+ if ! dnf repolist | grep PowerTools; then
+ if prompt "PowerTools not found, shall I install it?"; then
+ dnf ${opts} config-manager --set-enabled powertools || enable_powertools_repo
+ fi
+ fi
+
+ echo >&2 " > Updating libarchive ..."
+ dnf ${opts} install libarchive
+
+ elif [[ $(os_version) =~ ^7(\..*)?$ ]]; then
+ package_manager=yum
+ echo >&2 " > Checking for EPEL ..."
+ if ! rpm -qa | grep epel-release > /dev/null; then
+ if prompt "EPEL not found, shall I install it?"; then
+ yum ${opts} install epel-release
+ fi
+ fi
+ yum makecache
+ fi
+}
+
+enable_powertools_repo() {
+ if ! dnf repolist | grep -q powertools; then
+ cat > /etc/yum.repos.d/powertools.repo <<-EOF
+ [powertools]
+ name=CentOS Linux \$releasever - PowerTools
+ mirrorlist=http://mirrorlist.centos.org/?release=\$releasever&arch=\$basearch&repo=PowerTools&infra=\$infra
+ #baseurl=http://mirror.centos.org/\$contentdir/\$releasever/PowerTools/\$basearch/os/
+ gpgcheck=1
+ enabled=1
+ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+EOF
+ else
+ echo "Something went wrong!"
+ exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+validate_tree_centos
+
+packages_to_install=
+
+for package in "${package_tree[@]}"; do
+ if rpm -q "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "${packages_to_install[@]}"
+ opts=
+ if [[ "${NON_INTERACTIVE}" == "1" ]]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+ # shellcheck disable=SC2068
+ ${package_manager} install ${opts} ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/clearlinux.sh b/packaging/installer/dependencies/clearlinux.sh
new file mode 100755
index 00000000..67d3ea24
--- /dev/null
+++ b/packaging/installer/dependencies/clearlinux.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << ClearLinux: [base] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+declare -a package_tree=(
+ c-basic
+ curl
+ devpkg-elfutils
+ devpkg-json-c
+ devpkg-libmnl
+ devpkg-libuv
+ devpkg-lz4
+ devpkg-openssl
+ devpkg-util-linux
+ devpkg-zlib
+ findutils
+ git
+ gzip
+ make
+ python3-basic
+ service-os-dev
+ sysadmin-basic
+ yaml-dev
+)
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if [[ "$(swupd bundle-info "$package" | grep Status | cut -d':' -f2)" == " Not installed" ]]; then
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ else
+ echo "Package '$package' is installed"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install: " "${packages_to_install[@]}"
+ # shellcheck disable=SC2068
+ swupd bundle-add ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/debian.sh b/packaging/installer/dependencies/debian.sh
new file mode 100755
index 00000000..692a7119
--- /dev/null
+++ b/packaging/installer/dependencies/debian.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Debian: [9] [10] [11] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+package_tree="
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ flex
+ g++
+ gcc
+ git
+ gzip
+ libatomic1
+ libelf-dev
+ libjson-c-dev
+ liblz4-dev
+ libmnl-dev
+ libssl-dev
+ libsystemd-dev
+ libtool
+ libuv1-dev
+ libyaml-dev
+ make
+ pkg-config
+ python
+ python3
+ tar
+ uuid-dev
+ zlib1g-dev
+ "
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=2068
+check_flags ${@}
+
+packages_to_install=
+
+for package in $package_tree; do
+ if dpkg -s "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '${package}' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z "$packages_to_install" ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "$packages_to_install"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # shellcheck disable=SC2034
+ DEBIAN_FRONTEND="noninteractive"
+ opts="${opts} -yq"
+ fi
+ echo "Running apt-get update and updating your APT caches ..."
+ apt-get update
+ # shellcheck disable=2086
+ apt-get install ${opts} $packages_to_install
+fi
diff --git a/packaging/installer/dependencies/fedora.sh b/packaging/installer/dependencies/fedora.sh
new file mode 100755
index 00000000..fc30b611
--- /dev/null
+++ b/packaging/installer/dependencies/fedora.sh
@@ -0,0 +1,122 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Fedora: [24->38] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+os_version() {
+ if [[ -f /etc/os-release ]]; then
+ # shellcheck disable=SC2002
+ cat /etc/os-release | grep VERSION_ID | cut -d'=' -f2
+ else
+ echo "Error: Cannot determine OS version!"
+ exit 1
+ fi
+}
+
+if [[ $(os_version) -gt 24 ]]; then
+ ulogd_pkg=
+else
+ ulogd_pkg=ulogd
+fi
+
+declare -a package_tree=(
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ elfutils-libelf-devel
+ findutils
+ flex
+ gcc
+ gcc-c++
+ git
+ gzip
+ json-c-devel
+ libatomic
+ libmnl-devel
+ libtool
+ libuuid-devel
+ libuv-devel
+ libyaml-devel
+ lz4-devel
+ make
+ openssl-devel
+ pkgconfig
+ python3
+ systemd-devel
+ tar
+ zlib-devel
+ "${ulogd_pkg}"
+)
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if rpm -q "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "${packages_to_install[@]}"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+ # shellcheck disable=SC2068
+ dnf install ${opts} ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/freebsd.sh b/packaging/installer/dependencies/freebsd.sh
new file mode 100755
index 00000000..eadbcfa9
--- /dev/null
+++ b/packaging/installer/dependencies/freebsd.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << FreeBSD >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+package_tree="
+ git
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ libtool
+ pkgconf
+ cmake
+ curl
+ gzip
+ lzlib
+ e2fsprogs-libuuid
+ json-c
+ libyaml
+ libuv
+ liblz4
+ openssl
+ python3
+ bison
+ flex
+ "
+
+prompt() {
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have prompted for ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+validate_tree_freebsd() {
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > Checking for gmake ..."
+ if ! pkg query %n-%v | grep -q gmake; then
+ if prompt "gmake is required to build on FreeBSD and is not installed. Shall I install it?"; then
+ pkg install ${opts} gmake
+ fi
+ fi
+}
+
+enable_repo () {
+ if ! dnf repolist | grep -q codeready; then
+cat >> /etc/yum.repos.d/oracle-linux-ol8.repo <<-EOF
+
+[ol8_codeready_builder]
+name=Oracle Linux \$releasever CodeReady Builder (\$basearch)
+baseurl=http://yum.oracle.com/repo/OracleLinux/OL8/codeready/builder/\$basearch
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
+gpgcheck=1
+enabled=1
+EOF
+ else
+ echo "Something went wrong!"
+ exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+validate_tree_freebsd
+
+packages_to_install=
+
+for package in $package_tree; do
+ if pkg info -Ix "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '${package}' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z "$packages_to_install" ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "$packages_to_install"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+ # shellcheck disable=SC2086
+ pkg install ${opts} $packages_to_install
+fi
diff --git a/packaging/installer/dependencies/gentoo.sh b/packaging/installer/dependencies/gentoo.sh
new file mode 100755
index 00000000..9cf7f281
--- /dev/null
+++ b/packaging/installer/dependencies/gentoo.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Gentoo >> | << Pentoo >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+package_tree="
+ dev-vcs/git
+ sys-apps/findutils
+ sys-devel/gcc
+ sys-devel/make
+ sys-devel/autoconf
+ sys-devel/autoconf-archive
+ sys-devel/autogen
+ sys-devel/automake
+ virtual/pkgconfig
+ dev-util/cmake
+ app-arch/tar
+ net-misc/curl
+ app-arch/gzip
+ sys-apps/util-linux
+ net-libs/libmnl
+ dev-libs/json-c
+ dev-libs/libyaml
+ dev-libs/libuv
+ app-arch/lz4
+ dev-libs/openssl
+ virtual/libelf
+ dev-lang/python
+ dev-libs/libuv
+ sys-devel/bison
+ sys-devel/flex
+ "
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in $package_tree; do
+ if qlist -IRv "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '${package}' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z "$packages_to_install" ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "$packages_to_install"
+ opts="--ask"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts=""
+ fi
+ # shellcheck disable=SC2086
+ emerge ${opts} $packages_to_install
+fi
diff --git a/packaging/installer/dependencies/macos.sh b/packaging/installer/dependencies/macos.sh
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/packaging/installer/dependencies/macos.sh
diff --git a/packaging/installer/dependencies/ol.sh b/packaging/installer/dependencies/ol.sh
new file mode 100755
index 00000000..2dc10cee
--- /dev/null
+++ b/packaging/installer/dependencies/ol.sh
@@ -0,0 +1,160 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Oracle Linux: [8, 9] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+declare -a package_tree=(
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ elfutils-libelf-devel
+ flex
+ gcc
+ gcc-c++
+ git
+ gzip
+ json-c-devel
+ libatomic
+ libmnl-devel
+ libtool
+ libuuid-devel
+ libuv-devel
+ libyaml-devel
+ lz4-devel
+ make
+ openssl-devel
+ pkgconfig
+ python3
+ systemd-devel
+ tar
+ zlib-devel
+)
+
+prompt() {
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have prompted for ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+validate_tree_ol() {
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ # shellcheck disable=SC1091
+ source /etc/os-release
+
+ # shellcheck disable=SC2153
+ version="$(echo "${VERSION}" | cut -f 1 -d '.')"
+
+ echo >&2 " > Checking for config-manager ..."
+ if ! dnf config-manager &> /dev/null; then
+ if prompt "config-manager not found, shall I install it?"; then
+ dnf ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo " > Checking for CodeReady Builder ..."
+ if [[ "${version}" =~ ^8(\..*)?$ ]]; then
+ if ! dnf repolist enabled | grep ol8_codeready_builder; then
+ if prompt "CodeReadyBuilder not found, shall I install it?"; then
+ dnf ${opts} config-manager --set-enabled ol8_codeready_builder || enable_repo
+ fi
+ fi
+ elif [[ "${version}" =~ ^9(\..*)?$ ]]; then
+ if ! dnf repolist enabled | grep ol9_codeready_builder; then
+ if prompt "CodeReadyBuilder not found, shall I install it?"; then
+ dnf ${opts} config-manager --set-enabled ol9_codeready_builder || enable_repo
+ fi
+ fi
+ fi
+
+ dnf makecache --refresh
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+validate_tree_ol
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if rpm -q "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+ echo "packages_to_install:" "${packages_to_install[@]}"
+ # shellcheck disable=SC2068
+ dnf install ${opts} ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/opensuse.sh b/packaging/installer/dependencies/opensuse.sh
new file mode 100755
index 00000000..ecf1268f
--- /dev/null
+++ b/packaging/installer/dependencies/opensuse.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << opeSUSE >>
+# supported versions: leap/15.3 and tumbleweed
+# it may work with SLES as well, although we have not tested with it
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+declare -a package_tree=(
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ flex
+ gcc
+ gcc-c++
+ git
+ gzip
+ libatomic1
+ libelf-devel
+ libjson-c-devel
+ liblz4-devel
+ libmnl-devel
+ libopenssl-devel
+ libtool
+ libuuid-devel
+ libuv-devel
+ libyaml-devel
+ make
+ pkg-config
+ python3
+ systemd-devel
+ tar
+ zlib-devel
+)
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if zypper search -i "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "${packages_to_install[@]}"
+ opts="--ignore-unknown"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="--non-interactive"
+ fi
+ # shellcheck disable=SC2068
+ zypper ${opts} install ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/rhel.sh b/packaging/installer/dependencies/rhel.sh
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/packaging/installer/dependencies/rhel.sh
diff --git a/packaging/installer/dependencies/rockylinux.sh b/packaging/installer/dependencies/rockylinux.sh
new file mode 100755
index 00000000..cc8d4520
--- /dev/null
+++ b/packaging/installer/dependencies/rockylinux.sh
@@ -0,0 +1,167 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Rocky Linux:[8.5] >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+declare -a package_tree=(
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ elfutils-libelf-devel
+ findutils
+ flex
+ gcc
+ gcc-c++
+ git
+ gzip
+ json-c-devel
+ libatomic
+ libmnl-devel
+ libtool
+ libuuid-devel
+ libuv-devel
+ libyaml-devel
+ lz4-devel
+ make
+ openssl-devel
+ pkgconfig
+ python3
+ systemd-devel
+ tar
+ zlib-devel
+)
+
+prompt() {
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have prompted for ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+validate_tree_rockylinux() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > Checking for config-manager ..."
+ if ! dnf config-manager; then
+ if prompt "config-manager not found, shall I install it?"; then
+ dnf ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for PowerTools ..."
+ if ! dnf repolist | grep PowerTools; then
+ if prompt "PowerTools not found, shall I install it?"; then
+ dnf ${opts} config-manager --set-enabled powertools || enable_powertools_repo
+ fi
+ fi
+
+ echo >&2 " > Updating libarchive ..."
+ dnf ${opts} install libarchive
+
+ dnf makecache --refresh
+}
+
+enable_powertools_repo() {
+ if ! dnf repolist | grep -q powertools; then
+ cat > /etc/yum.repos.d/powertools.repo <<-EOF
+ [powertools]
+ name=Rocky Linux \$releasever - PowerTools
+ mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=\$basearch&repo=PowerTools-\$releasever
+ #baseurl=http://dl.rockylinux.org/\$contentdir/\$releasever/PowerTools/\$basearch/os/
+ gpgcheck=1
+ enabled=1
+ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
+EOF
+ else
+ echo "Something went wrong!"
+ exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+validate_tree_rockylinux
+
+packages_to_install=
+
+# shellcheck disable=SC2068
+for package in ${package_tree[@]}; do
+ if rpm -q "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '$package' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z $packages_to_install ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "${packages_to_install[@]}"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+ # shellcheck disable=SC2068
+ dnf install ${opts} ${packages_to_install[@]}
+fi
diff --git a/packaging/installer/dependencies/sabayon.sh b/packaging/installer/dependencies/sabayon.sh
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/packaging/installer/dependencies/sabayon.sh
diff --git a/packaging/installer/dependencies/ubuntu.sh b/packaging/installer/dependencies/ubuntu.sh
new file mode 100755
index 00000000..e223ca38
--- /dev/null
+++ b/packaging/installer/dependencies/ubuntu.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+# Package tree used for installing netdata on distribution:
+# << Ubuntu: [18.04] [20.04] [20.10] [21.04] [21.10] >> | << Linux Mint >>
+
+set -e
+
+NON_INTERACTIVE=0
+DONT_WAIT=0
+
+package_tree="
+ autoconf
+ autoconf-archive
+ autogen
+ automake
+ bison
+ cmake
+ curl
+ flex
+ g++
+ gcc
+ git
+ gzip
+ libatomic1
+ libelf-dev
+ libjson-c-dev
+ liblz4-dev
+ libmnl-dev
+ libssl-dev
+ libsystemd-dev
+ libtool
+ libuv1-dev
+ libyaml-dev
+ make
+ pkg-config
+ python3
+ tar
+ uuid-dev
+ zlib1g-dev
+ "
+usage() {
+ cat << EOF
+OPTIONS:
+[--dont-wait] [--non-interactive] [ ]
+EOF
+}
+
+check_flags() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+}
+
+# shellcheck disable=SC2068
+check_flags ${@}
+
+packages_to_install=
+
+for package in $package_tree; do
+ if dpkg -s "$package" &> /dev/null; then
+ echo "Package '${package}' is installed"
+ else
+ echo "Package '${package}' is NOT installed"
+ packages_to_install="$packages_to_install $package"
+ fi
+done
+
+if [[ -z "$packages_to_install" ]]; then
+ echo "All required packages are already installed. Skipping .."
+else
+ echo "packages_to_install:" "$packages_to_install"
+ opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ export DEBIAN_FRONTEND="noninteractive"
+ opts="${opts} -yq"
+ fi
+ echo "Running apt-get update and updating your APT caches ..."
+ apt-get update
+ # shellcheck disable=SC2086
+ apt-get install ${opts} $packages_to_install
+fi
diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh
new file mode 100644
index 00000000..dd3158d6
--- /dev/null
+++ b/packaging/installer/functions.sh
@@ -0,0 +1,1080 @@
+#!/bin/sh
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# next unused error code: L0003
+
+# make sure we have a UID
+[ -z "${UID}" ] && UID="$(id -u)"
+# -----------------------------------------------------------------------------
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_BLACK=""
+ TPUT_RED=""
+ TPUT_GREEN=""
+ TPUT_YELLOW=""
+ TPUT_BLUE=""
+ TPUT_PURPLE=""
+ TPUT_CYAN=""
+ TPUT_WHITE=""
+ TPUT_BGBLACK=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BGYELLOW=""
+ TPUT_BGBLUE=""
+ TPUT_BGPURPLE=""
+ TPUT_BGCYAN=""
+ TPUT_BGWHITE=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+ TPUT_UNDERLINED=""
+ TPUT_BLINK=""
+ TPUT_INVERTED=""
+ TPUT_STANDOUT=""
+ TPUT_BELL=""
+ TPUT_CLEAR=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput 1> /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ # shellcheck disable=SC2034
+ TPUT_BLACK="$(tput setaf 0)"
+ # shellcheck disable=SC2034
+ TPUT_RED="$(tput setaf 1)"
+ TPUT_GREEN="$(tput setaf 2)"
+ # shellcheck disable=SC2034
+ TPUT_YELLOW="$(tput setaf 3)"
+ # shellcheck disable=SC2034
+ TPUT_BLUE="$(tput setaf 4)"
+ # shellcheck disable=SC2034
+ TPUT_PURPLE="$(tput setaf 5)"
+ # shellcheck disable=SC2034
+ TPUT_CYAN="$(tput setaf 6)"
+ TPUT_WHITE="$(tput setaf 7)"
+ # shellcheck disable=SC2034
+ TPUT_BGBLACK="$(tput setab 0)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ # shellcheck disable=SC2034
+ TPUT_BGYELLOW="$(tput setab 3)"
+ # shellcheck disable=SC2034
+ TPUT_BGBLUE="$(tput setab 4)"
+ # shellcheck disable=SC2034
+ TPUT_BGPURPLE="$(tput setab 5)"
+ # shellcheck disable=SC2034
+ TPUT_BGCYAN="$(tput setab 6)"
+ # shellcheck disable=SC2034
+ TPUT_BGWHITE="$(tput setab 7)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ # shellcheck disable=SC2034
+ TPUT_UNDERLINED="$(tput smul)"
+ # shellcheck disable=SC2034
+ TPUT_BLINK="$(tput blink)"
+ # shellcheck disable=SC2034
+ TPUT_INVERTED="$(tput rev)"
+ # shellcheck disable=SC2034
+ TPUT_STANDOUT="$(tput smso)"
+ # shellcheck disable=SC2034
+ TPUT_BELL="$(tput bel)"
+ # shellcheck disable=SC2034
+ TPUT_CLEAR="$(tput clear)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+check_for_curl() {
+ if [ -z "${curl}" ]; then
+ curl="$(PATH="${PATH}:/opt/netdata/bin" command -v curl 2>/dev/null && true)"
+ fi
+}
+
+get() {
+ url="${1}"
+
+ check_for_curl
+
+ if [ -n "${curl}" ]; then
+ "${curl}" -q -o - -sSL --connect-timeout 10 --retry 3 "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ wget -T 15 -O - "${url}"
+ else
+ fatal "I need curl or wget to proceed, but neither is available on this system." "L0002"
+ fi
+}
+
+download_file() {
+ url="${1}"
+ dest="${2}"
+ name="${3}"
+ opt="${4}"
+
+ check_for_curl
+
+ if [ -n "${curl}" ]; then
+ run "${curl}" -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ run wget -T 15 -O "${dest}" "${url}"
+ else
+ echo >&2
+ echo >&2 "Downloading ${name} from '${url}' failed because of missing mandatory packages."
+ if [ -n "$opt" ]; then
+ echo >&2 "Either add packages or disable it by issuing '--disable-${opt}' in the installer"
+ fi
+ echo >&2
+
+ run_failed "I need curl or wget to proceed, but neither is available on this system."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# external component handling
+
+fetch_and_verify() {
+ component="${1}"
+ url="${2}"
+ base_name="${3}"
+ tmp="${4}"
+ override="${5}"
+
+ if [ -z "${override}" ]; then
+ download_file "${url}" "${tmp}/${base_name}" "${component}"
+ else
+ progress "Using provided ${component} archive ${override}"
+ run cp "${override}" "${tmp}/${base_name}"
+ fi
+
+ if [ ! -f "${tmp}/${base_name}" ] || [ ! -s "${tmp}/${base_name}" ]; then
+ run_failed "Unable to find usable archive for ${component}"
+ return 1
+ fi
+
+ grep "${base_name}\$" "${INSTALLER_DIR}/packaging/${component}.checksums" > "${tmp}/sha256sums.txt" 2> /dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+ run_failed "${component} files checksum validation failed."
+ return 1
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+netdata_banner() {
+ l1=" ^" \
+ l2=" |.-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-" \
+ l4=" +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->" \
+ space=" "
+ l3f=" | '-' '-' '-' '-' '-'"
+ l3e=" '-' '-' '-' '-' '-' "
+
+ netdata="netdata"
+ chartcolor="${TPUT_DIM}"
+
+ echo >&2
+ echo >&2 "${chartcolor}${l1}${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l2%-. .-. .-. .-. .-. .-. .-. .-}${space}${TPUT_RESET}${TPUT_BOLD}${TPUT_GREEN}${netdata}${TPUT_RESET}${chartcolor}${l2# |.-. .-. .-. .-. .-. .-. .-. }${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l3f}${l3e}${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l4}${TPUT_RESET}"
+ echo >&2
+}
+
+# -----------------------------------------------------------------------------
+# portable service command
+
+service_cmd="$(command -v service 2> /dev/null || true)"
+rcservice_cmd="$(command -v rc-service 2> /dev/null || true)"
+systemctl_cmd="$(command -v systemctl 2> /dev/null || true)"
+service() {
+
+ cmd="${1}"
+ action="${2}"
+
+ if [ -n "${systemctl_cmd}" ]; then
+ run "${systemctl_cmd}" "${action}" "${cmd}"
+ return $?
+ elif [ -n "${service_cmd}" ]; then
+ run "${service_cmd}" "${cmd}" "${action}"
+ return $?
+ elif [ -n "${rcservice_cmd}" ]; then
+ run "${rcservice_cmd}" "${cmd}" "${action}"
+ return $?
+ fi
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# portable pidof
+
+safe_pidof() {
+ pidof_cmd="$(command -v pidof 2> /dev/null)"
+ if [ -n "${pidof_cmd}" ]; then
+ ${pidof_cmd} "${@}"
+ return $?
+ else
+ ps -acxo pid,comm |
+ sed "s/^ *//g" |
+ grep netdata |
+ cut -d ' ' -f 1
+ return $?
+ fi
+}
+
+# -----------------------------------------------------------------------------
+find_processors() {
+ # Most UNIX systems have `nproc` as part of their userland (including Linux and BSD)
+ if command -v nproc > /dev/null; then
+ nproc && return
+ fi
+
+ # macOS has no nproc but it may have gnproc installed from Homebrew or from Macports.
+ if command -v gnproc > /dev/null; then
+ gnproc && return
+ fi
+
+ if [ -f "/proc/cpuinfo" ]; then
+ # linux
+ cpus=$(grep -c ^processor /proc/cpuinfo)
+ else
+ # freebsd
+ cpus=$(sysctl hw.ncpu 2> /dev/null | grep ^hw.ncpu | cut -d ' ' -f 2)
+ fi
+ if [ -z "${cpus}" ] || [ $((cpus)) -lt 1 ]; then
+ echo 1
+ else
+ echo "${cpus}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+exit_reason() {
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ EXIT_REASON="${1}"
+ EXIT_CODE="${2}"
+ if [ -n "${NETDATA_PROPAGATE_WARNINGS}" ]; then
+ if [ -n "${NETDATA_SCRIPT_STATUS_PATH}" ]; then
+ {
+ echo "EXIT_REASON=\"${EXIT_REASON}\""
+ echo "EXIT_CODE=\"${EXIT_CODE}\""
+ echo "NETDATA_WARNINGS=\"${NETDATA_WARNINGS}${SAVED_WARNINGS}\""
+ } >> "${NETDATA_SCRIPT_STATUS_PATH}"
+ else
+ export EXIT_REASON
+ export EXIT_CODE
+ export NETDATA_WARNINGS="${NETDATA_WARNINGS}${SAVED_WARNINGS}"
+ fi
+ fi
+ fi
+}
+
+fatal() {
+ printf >&2 "%s ABORTED %s %s \n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ SAVED_WARNINGS="${SAVED_WARNINGS}\n - ${1}"
+ fi
+ exit_reason "${1}" "${2}"
+ exit 1
+}
+
+warning() {
+ printf >&2 "%s WARNING %s %s\n\n" "${TPUT_BGYELLOW}${TPUT_BLACK}${TPUT_BOLD}" "${TPUT_RESET}" "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ SAVED_WARNINGS="${SAVED_WARNINGS}\n - ${1}"
+ fi
+}
+
+run_ok() {
+ printf >&2 "%s OK %s %s\n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${1:-''}"
+}
+
+run_failed() {
+ printf >&2 "%s FAILED %s %s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${1:-''}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ] && [ -n "${1:-''}" ]; then
+ SAVED_WARNINGS="${SAVED_WARNINGS}\n - ${1}"
+ fi
+}
+
+ESCAPED_PRINT_METHOD=
+if printf "%s " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%s " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ local_user="${USER--}"
+ local_dir="${PWD}"
+ if [ "${UID}" = "0" ]; then
+ info="[root ${local_dir}]# "
+ info_console="[${TPUT_DIM}${local_dir}${TPUT_RESET}]# "
+ else
+ info="[${local_user} ${local_dir}]$ "
+ info_console="[${TPUT_DIM}${local_dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "%s" "${info}"
+ escaped_print "${@}"
+ printf "%s" " ... "
+ } >> "${run_logfile}"
+
+ printf >&2 "%s" "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "%s\n" "${TPUT_RESET}"
+
+ "${@}"
+
+ ret=$?
+ if [ ${ret} -ne 0 ]; then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code %s\n" "${ret}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ SAVED_WARNINGS="${SAVED_WARNINGS}\n - Command '${*}' failed with exit code ${ret}."
+ fi
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+iscontainer() {
+ # man systemd-detect-virt
+ cmd=$(command -v systemd-detect-virt 2> /dev/null)
+ if [ -n "${cmd}" ] && [ -x "${cmd}" ]; then
+ "${cmd}" --container > /dev/null 2>&1 && return 0
+ fi
+
+ # /proc/1/sched exposes the host's pid of our init !
+ # http://stackoverflow.com/a/37016302
+ pid=$(head -n 1 /proc/1/sched 2> /dev/null | {
+ # shellcheck disable=SC2034
+ IFS='(),#:' read -r name pid th threads
+ echo "$pid"
+ })
+ if [ -n "${pid}" ]; then
+ pid=$((pid + 0))
+ [ ${pid} -gt 1 ] && return 0
+ fi
+
+ # lxc sets environment variable 'container'
+ # shellcheck disable=SC2154
+ [ -n "${container}" ] && return 0
+
+ # docker creates /.dockerenv
+ # http://stackoverflow.com/a/25518345
+ [ -f "/.dockerenv" ] && return 0
+
+ # ubuntu and debian supply /bin/running-in-container
+ # https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/upstart/1.12.1-0ubuntu4/file/bin/running-in-container
+ if [ -x "/bin/running-in-container" ]; then
+ "/bin/running-in-container" > /dev/null 2>&1 && return 0
+ fi
+
+ return 1
+}
+
+get_os_key() {
+ if [ -f /etc/os-release ]; then
+ # shellcheck disable=SC1091
+ . /etc/os-release || return 1
+ echo "${ID}-${VERSION_ID}"
+
+ elif [ -f /etc/redhat-release ]; then
+ cat /etc/redhat-release
+ else
+ echo "unknown"
+ fi
+}
+
+get_group(){
+ if command -v getent > /dev/null 2>&1; then
+ getent group "${1:-""}"
+ else
+ grep "^${1}:" /etc/group
+ fi
+}
+
+issystemd() {
+ pids=''
+ p=''
+ myns=''
+ ns=''
+ systemctl=''
+
+ # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd
+ if [ ! -d /lib/systemd/system ] && [ ! -d /usr/lib/systemd/system ]; then
+ return 1
+ fi
+
+ # if there is no systemctl command, it is not systemd
+ systemctl=$(command -v systemctl 2> /dev/null)
+ if [ -z "${systemctl}" ] || [ ! -x "${systemctl}" ]; then
+ return 1
+ fi
+
+ # if pid 1 is systemd, it is systemd
+ [ "$(basename "$(readlink /proc/1/exe)" 2> /dev/null)" = "systemd" ] && return 0
+
+ # if systemd is not running, it is not systemd
+ pids=$(safe_pidof systemd 2> /dev/null)
+ [ -z "${pids}" ] && return 1
+
+ # check if the running systemd processes are not in our namespace
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+ for p in ${pids}; do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ # if pid of systemd is in our namespace, it is systemd
+ [ -n "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0
+ done
+
+ # else, it is not systemd
+ return 1
+}
+
+get_systemd_service_dir() {
+ if [ -w "/lib/systemd/system" ]; then
+ echo "/lib/systemd/system"
+ elif [ -w "/usr/lib/systemd/system" ]; then
+ echo "/usr/lib/systemd/system"
+ elif [ -w "/etc/systemd/system" ]; then
+ echo "/etc/systemd/system"
+ fi
+}
+
+install_non_systemd_init() {
+ [ "${UID}" != 0 ] && return 1
+ key="$(get_os_key)"
+
+ if [ -d /etc/init.d ] && [ ! -f /etc/init.d/netdata ]; then
+ if expr "${key}" : "^(gentoo|alpine).*"; then
+ echo >&2 "Installing OpenRC init file..."
+ run cp system/openrc/init.d/netdata /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run rc-update add netdata default &&
+ return 0
+
+ elif expr "${key}" : "^devuan*" || [ "${key}" = "debian-7" ] || [ "${key}" = "ubuntu-12.04" ] || [ "${key}" = "ubuntu-14.04" ]; then
+ echo >&2 "Installing LSB init file..."
+ run cp system/lsb/init.d/netdata /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run update-rc.d netdata defaults &&
+ run update-rc.d netdata enable &&
+ return 0
+ elif expr "${key}" : "^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).*"; then
+ echo >&2 "Installing init.d file..."
+ run cp system/initd/init.d/netdata /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run chkconfig netdata on &&
+ return 0
+ else
+ warning "Could not determine what type of init script to install on this system."
+ return 1
+ fi
+ elif [ -f /etc/init.d/netdata ]; then
+ echo >&2 "file '/etc/init.d/netdata' already exists."
+ return 0
+ else
+ warning "Could not determine what type of init script to install on this system."
+ fi
+
+ return 1
+}
+
+run_install_service_script() {
+ if [ -z "${tmpdir}" ]; then
+ tmpdir="${TMPDIR:-/tmp}"
+ fi
+
+ # shellcheck disable=SC2154
+ save_path="${tmpdir}/netdata-service-cmds"
+ # shellcheck disable=SC2068
+ "${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh" --save-cmds "${save_path}" ${@}
+
+ case $? in
+ 0)
+ if [ -r "${save_path}" ]; then
+ # shellcheck disable=SC1090
+ . "${save_path}"
+ fi
+
+ if [ -z "${NETDATA_INSTALLER_START_CMD}" ]; then
+ if [ -n "${NETDATA_START_CMD}" ]; then
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+ else
+ NETDATA_INSTALLER_START_CMD="netdata"
+ fi
+ fi
+ ;;
+ 1)
+ if [ -z "${NETDATA_SERVICE_WARNED_1}" ]; then
+ warning "Intenral error encountered while attempting to install or manage Netdata as a system service. This is probably a bug."
+ NETDATA_SERVICE_WARNED_1=1
+ fi
+ ;;
+ 2)
+ if [ -z "${NETDATA_SERVICE_WARNED_2}" ]; then
+ warning "Failed to detect system service manager type. Cannot cleanly install or manage Netdata as a system service. If you are running this script in a container, this is expected and can safely be ignored."
+ NETDATA_SERVICE_WARNED_2=1
+ fi
+ ;;
+ 3)
+ if [ -z "${NETDATA_SERVICE_WARNED_3}" ]; then
+ warning "Detected an unsupported system service manager. Manual setup will be required to manage Netdata as a system service."
+ NETDATA_SERVICE_WARNED_3=1
+ fi
+ ;;
+ 4)
+ if [ -z "${NETDATA_SERVICE_WARNED_4}" ]; then
+ warning "Detected a supported system service manager, but failed to install Netdata as a system service. Usually this is a result of incorrect permissions. Manually running ${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh may provide more information about the exact issue."
+ NETDATA_SERVICE_WARNED_4=1
+ fi
+ ;;
+ 5)
+ if [ -z "${NETDATA_SERVICE_WARNED_5}" ]; then
+ warning "We do not support managing Netdata as a system service on this platform. Manual setup will be required."
+ NETDATA_SERVICE_WARNED_5=1
+ fi
+ ;;
+ esac
+}
+
+install_netdata_service() {
+ if [ "${UID}" -eq 0 ]; then
+ if [ -x "${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh" ]; then
+ run_install_service_script && return 0
+ else
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="netdatacli shutdown-agent"
+
+ NETDATA_START_CMD="netdata"
+ NETDATA_INSTALLER_START_CMD=""
+
+ uname="$(uname 2> /dev/null)"
+
+ if [ "${uname}" = "Darwin" ]; then
+ if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ]; then
+ echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists."
+ return 0
+ else
+ echo >&2 "Installing MacOS X plist file..."
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ run cp system/launchd/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist &&
+ run launchctl load /Library/LaunchDaemons/com.github.netdata.plist &&
+ NETDATA_START_CMD="launchctl start com.github.netdata" &&
+ NETDATA_STOP_CMD="launchctl stop com.github.netdata"
+ return 0
+ fi
+
+ elif [ "${uname}" = "FreeBSD" ]; then
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ run cp system/freebsd/rc.d/netdata /etc/rc.d/netdata && NETDATA_START_CMD="service netdata start" &&
+ NETDATA_STOP_CMD="service netdata stop" &&
+ NETDATA_INSTALLER_START_CMD="service netdata onestart" &&
+ myret=$?
+
+ echo >&2 "Note: To explicitly enable netdata automatic start, set 'netdata_enable' to 'YES' in /etc/rc.conf"
+ echo >&2 ""
+
+ return "${myret}"
+
+ elif issystemd; then
+ # systemd is running on this system
+ NETDATA_START_CMD="systemctl start netdata"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="systemctl stop netdata"
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+
+ SYSTEMD_DIRECTORY="$(get_systemd_service_dir)"
+
+ if [ "${SYSTEMD_DIRECTORY}x" != "x" ]; then
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="run systemctl enable netdata"
+ IS_NETDATA_ENABLED="$(systemctl is-enabled netdata 2> /dev/null || echo "Netdata not there")"
+ if [ "${IS_NETDATA_ENABLED}" = "disabled" ]; then
+ echo >&2 "Netdata was there and disabled, make sure we don't re-enable it ourselves"
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="true"
+ fi
+
+ echo >&2 "Installing systemd service..."
+ run cp system/systemd/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" &&
+ run systemctl daemon-reload &&
+ ${ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED} &&
+ return 0
+ else
+ warning "Could not find a systemd service directory, unable to install Netdata systemd service."
+ fi
+ else
+ install_non_systemd_init
+ ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ if [ -n "${service_cmd}" ]; then
+ NETDATA_START_CMD="service netdata start"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="service netdata stop"
+ elif [ -n "${rcservice_cmd}" ]; then
+ NETDATA_START_CMD="rc-service netdata start"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="rc-service netdata stop"
+ fi
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+ fi
+
+ return ${ret}
+ fi
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# stop netdata
+
+pidisnetdata() {
+ if [ -d /proc/self ]; then
+ if [ -z "$1" ] || [ ! -f "/proc/$1/stat" ]; then
+ return 1
+ fi
+ [ "$(cut -d '(' -f 2 "/proc/$1/stat" | cut -d ')' -f 1)" = "netdata" ] && return 0
+ return 1
+ fi
+ return 0
+}
+
+stop_netdata_on_pid() {
+ pid="${1}"
+ ret=0
+ count=0
+
+ pidisnetdata "${pid}" || return 0
+
+ printf >&2 "Stopping netdata on pid %s ..." "${pid}"
+ while [ -n "${pid}" ] && [ ${ret} -eq 0 ]; do
+ if [ ${count} -gt 24 ]; then
+ warning "Cannot stop netdata agent with PID ${pid}."
+ return 1
+ fi
+
+ count=$((count + 1))
+
+ pidisnetdata "${pid}" || ret=1
+ if [ ${ret} -eq 1 ]; then
+ break
+ fi
+
+ if [ ${count} -lt 12 ]; then
+ run kill "${pid}" 2> /dev/null
+ ret=$?
+ else
+ run kill -9 "${pid}" 2> /dev/null
+ ret=$?
+ fi
+
+ test ${ret} -eq 0 && printf >&2 "." && sleep 5
+
+ done
+
+ echo >&2
+ if [ ${ret} -eq 0 ]; then
+ warning "Failed to stop netdata agent process with PID ${pid}."
+ return 1
+ fi
+
+ echo >&2 "netdata on pid ${pid} stopped."
+ return 0
+}
+
+netdata_pids() {
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+
+ for p in \
+ $(cat /var/run/netdata.pid 2> /dev/null) \
+ $(cat /var/run/netdata/netdata.pid 2> /dev/null) \
+ $(safe_pidof netdata 2> /dev/null); do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ if [ -z "${myns}" ] || [ -z "${ns}" ] || [ "${myns}" = "${ns}" ]; then
+ pidisnetdata "${p}" && echo "${p}"
+ fi
+ done
+}
+
+stop_all_netdata() {
+ stop_success=0
+
+ if [ -x "${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh" ]; then
+ run_install_service_script --cmds-only
+ fi
+
+ if [ "${UID}" -eq 0 ]; then
+
+ uname="$(uname 2>/dev/null)"
+
+ # Any of these may fail, but we need to not bail if they do.
+ if [ -n "${NETDATA_STOP_CMD}" ]; then
+ if ${NETDATA_STOP_CMD}; then
+ stop_success=1
+ sleep 5
+ fi
+ elif issystemd; then
+ if systemctl stop netdata; then
+ stop_success=1
+ sleep 5
+ fi
+ elif [ "${uname}" = "Darwin" ]; then
+ if launchctl stop netdata; then
+ stop_success=1
+ sleep 5
+ fi
+ elif [ "${uname}" = "FreeBSD" ]; then
+ if /etc/rc.d/netdata stop; then
+ stop_success=1
+ sleep 5
+ fi
+ else
+ if service netdata stop; then
+ stop_success=1
+ sleep 5
+ fi
+ fi
+ fi
+
+ if [ "$stop_success" = "0" ]; then
+ if [ -n "$(netdata_pids)" ] && [ -n "$(command -v netdatacli)" ]; then
+ netdatacli shutdown-agent
+ sleep 20
+ fi
+
+ for p in $(netdata_pids); do
+ # shellcheck disable=SC2086
+ stop_netdata_on_pid ${p}
+ done
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# restart netdata
+
+restart_netdata() {
+ netdata="${1}"
+ shift
+
+ started=0
+
+ progress "Restarting netdata instance"
+
+ if [ -x "${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh" ]; then
+ run_install_service_script --cmds-only
+ fi
+
+ if [ -z "${NETDATA_INSTALLER_START_CMD}" ]; then
+ if [ -n "${NETDATA_START_CMD}" ]; then
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+ else
+ NETDATA_INSTALLER_START_CMD="${netdata}"
+ fi
+ fi
+
+ if [ "${UID}" -eq 0 ]; then
+ echo >&2
+ echo >&2 "Stopping all netdata threads"
+ run stop_all_netdata
+
+ echo >&2 "Starting netdata using command '${NETDATA_INSTALLER_START_CMD}'"
+ # shellcheck disable=SC2086
+ run ${NETDATA_INSTALLER_START_CMD} && started=1
+
+ if [ ${started} -eq 1 ] && sleep 5 && [ -z "$(netdata_pids)" ]; then
+ echo >&2 "Ooops! it seems netdata is not started."
+ started=0
+ fi
+
+ if [ ${started} -eq 0 ]; then
+ echo >&2 "Attempting another netdata start using command '${NETDATA_INSTALLER_START_CMD}'"
+ # shellcheck disable=SC2086
+ run ${NETDATA_INSTALLER_START_CMD} && started=1
+ fi
+
+ if [ ${started} -eq 1 ] && sleep 5 && [ -z "$(netdata_pids)" ]; then
+ echo >&2 "Hm... it seems netdata is still not started."
+ started=0
+ fi
+ fi
+
+
+ if [ ${started} -eq 0 ]; then
+ # still not started... another forced attempt, just run the binary
+ warning "Netdata service still not started, attempting another forced restart by running '${netdata} ${*}'"
+ run stop_all_netdata
+ run "${netdata}" "${@}"
+ return $?
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+# install netdata logrotate
+
+install_netdata_logrotate() {
+ src="${NETDATA_PREFIX}/usr/lib/netdata/system/logrotate/netdata"
+
+ if [ "${UID}" -eq 0 ]; then
+ if [ -d /etc/logrotate.d ]; then
+ if [ ! -f /etc/logrotate.d/netdata ]; then
+ run cp "${src}" /etc/logrotate.d/netdata
+ fi
+
+ if [ -f /etc/logrotate.d/netdata ]; then
+ run chmod 644 /etc/logrotate.d/netdata
+ fi
+
+ return 0
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# create netdata.conf
+
+create_netdata_conf() {
+ path="${1}"
+ url="${2}"
+
+ if [ -s "${path}" ]; then
+ return 0
+ fi
+
+ if [ -n "$url" ]; then
+ echo >&2 "Downloading default configuration from netdata..."
+ sleep 5
+
+ # remove a possibly obsolete configuration file
+ [ -f "${path}.new" ] && rm "${path}.new"
+
+ # disable a proxy to get data from the local netdata
+ export http_proxy=
+ export https_proxy=
+
+ check_for_curl
+
+ if [ -n "${curl}" ]; then
+ run "${curl}" -sSL --connect-timeout 10 --retry 3 "${url}" > "${path}.new"
+ elif command -v wget 1> /dev/null 2>&1; then
+ run wget -T 15 -O - "${url}" > "${path}.new"
+ fi
+
+ if [ -s "${path}.new" ]; then
+ run mv "${path}.new" "${path}"
+ run_ok "New configuration saved for you to edit at ${path}"
+ else
+ [ -f "${path}.new" ] && rm "${path}.new"
+ run_failed "Cannot download configuration from netdata daemon using url '${url}'"
+ url=''
+ fi
+ fi
+
+ if [ -z "$url" ]; then
+ cat << EOF > "${path}"
+# netdata can generate its own config which is available at 'http://<IP>:19999/netdata.conf'
+# You can download it using:
+# curl -o ${path} http://localhost:19999/netdata.conf
+# or
+# wget -O ${path} http://localhost:19999/netdata.conf
+EOF
+ fi
+
+}
+
+portable_add_user() {
+ username="${1}"
+ homedir="${2}"
+
+ [ -z "${homedir}" ] && homedir="/tmp"
+
+ # Check if user exists
+ if command -v getent > /dev/null 2>&1; then
+ if getent passwd "${username}" > /dev/null 2>&1; then
+ echo >&2 "User '${username}' already exists."
+ return 0
+ fi
+ else
+ if cut -d ':' -f 1 < /etc/passwd | grep "^${username}$" 1> /dev/null 2>&1; then
+ echo >&2 "User '${username}' already exists."
+ return 0
+ fi
+ fi
+
+ echo >&2 "Adding ${username} user account with home ${homedir} ..."
+
+ nologin="$(command -v nologin || echo '/bin/false')"
+
+ if command -v useradd 1> /dev/null 2>&1; then
+ run useradd -r -g "${username}" -c "${username}" -s "${nologin}" --no-create-home -d "${homedir}" "${username}" && return 0
+ elif command -v pw 1> /dev/null 2>&1; then
+ run pw useradd "${username}" -d "${homedir}" -g "${username}" -s "${nologin}" && return 0
+ elif command -v adduser 1> /dev/null 2>&1; then
+ run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0
+ elif command -v sysadminctl 1> /dev/null 2>&1; then
+ run sysadminctl -addUser "${username}" && return 0
+ fi
+
+ warning "Failed to add ${username} user account!"
+
+ return 1
+}
+
+portable_add_group() {
+ groupname="${1}"
+
+ # Check if group exist
+ if get_group "${groupname}" > /dev/null 2>&1; then
+ echo >&2 "Group '${groupname}' already exists."
+ return 0
+ fi
+
+ echo >&2 "Adding ${groupname} user group ..."
+
+ # Linux
+ if command -v groupadd 1> /dev/null 2>&1; then
+ run groupadd -r "${groupname}" && return 0
+ elif command -v pw 1> /dev/null 2>&1; then
+ run pw groupadd "${groupname}" && return 0
+ elif command -v addgroup 1> /dev/null 2>&1; then
+ run addgroup "${groupname}" && return 0
+ elif command -v dseditgroup 1> /dev/null 2>&1; then
+ dseditgroup -o create "${groupname}" && return 0
+ fi
+
+ warning >&2 "Failed to add ${groupname} user group !"
+ return 1
+}
+
+portable_add_user_to_group() {
+ groupname="${1}"
+ username="${2}"
+
+ # Check if group exist
+ if ! get_group "${groupname}" > /dev/null 2>&1; then
+ echo >&2 "Group '${groupname}' does not exist."
+ # Don’t treat this as a failure, if the group does not exist we should not be trying to add the user to it.
+ return 0
+ fi
+
+ # Check if user is in group
+ if get_group "${groupname}" | cut -d ':' -f 4 | grep -wq "${username}"; then
+ # username is already there
+ echo >&2 "User '${username}' is already in group '${groupname}'."
+ return 0
+ else
+ # username is not in group
+ echo >&2 "Adding ${username} user to the ${groupname} group ..."
+
+ # Linux
+ if command -v usermod 1> /dev/null 2>&1; then
+ run usermod -a -G "${groupname}" "${username}" && return 0
+ elif command -v pw 1> /dev/null 2>&1; then
+ run pw groupmod "${groupname}" -m "${username}" && return 0
+ elif command -v addgroup 1> /dev/null 2>&1; then
+ run addgroup "${username}" "${groupname}" && return 0
+ elif command -v dseditgroup 1> /dev/null 2>&1; then
+ dseditgroup -u "${username}" "${groupname}" && return 0
+ fi
+
+ warning >&2 "Failed to add user ${username} to group ${groupname}!"
+ return 1
+ fi
+}
+
+safe_sha256sum() {
+ # Within the context of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ elif command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use" "L0001"
+ fi
+}
+
+_get_scheduler_type() {
+ if _get_intervaldir > /dev/null ; then
+ echo 'interval'
+ elif issystemd ; then
+ echo 'systemd'
+ elif [ -d /etc/cron.d ] ; then
+ echo 'crontab'
+ else
+ echo 'none'
+ fi
+}
+
+_get_intervaldir() {
+ if [ -d /etc/cron.daily ]; then
+ echo /etc/cron.daily
+ elif [ -d /etc/periodic/daily ]; then
+ echo /etc/periodic/daily
+ else
+ return 1
+ fi
+
+ return 0
+}
+
+install_netdata_updater() {
+ if [ "${INSTALLER_DIR}" ] && [ -f "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" ]; then
+ cat "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+ fi
+
+ if [ "${NETDATA_SOURCE_DIR}" ] && [ -f "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" ]; then
+ cat "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+ fi
+
+ if issystemd && [ -n "$(get_systemd_service_dir)" ]; then
+ cat "${NETDATA_SOURCE_DIR}/system/systemd/netdata-updater.timer" > "$(get_systemd_service_dir)/netdata-updater.timer"
+ cat "${NETDATA_SOURCE_DIR}/system/systemd/netdata-updater.service" > "$(get_systemd_service_dir)/netdata-updater.service"
+ fi
+
+ sed -i -e "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+
+ chmod 0755 "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh"
+ echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh${TPUT_RESET}"
+ echo >&2
+
+ return 0
+}
+
+set_netdata_updater_channel() {
+ sed -i -e "s/^RELEASE_CHANNEL=.*/RELEASE_CHANNEL=\"${RELEASE_CHANNEL}\"/" "${NETDATA_USER_CONFIG_DIR}/.environment"
+}
diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh
new file mode 100755
index 00000000..bdd52939
--- /dev/null
+++ b/packaging/installer/install-required-packages.sh
@@ -0,0 +1,2112 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC2034
+# We use lots of computed variable names in here, so we need to disable shellcheck 2034
+
+export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+export LC_ALL=C
+
+# Be nice on production environments
+renice 19 $$ > /dev/null 2> /dev/null
+
+ME="${0}"
+
+if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
+ echo >&2 "Sorry! This script needs BASH version 4+, but you have BASH version ${BASH_VERSION}"
+ exit 1
+fi
+
+# These options control which packages we are going to install
+# They can be pre-set, but also can be controlled with command line options
+PACKAGES_NETDATA=${PACKAGES_NETDATA-1}
+PACKAGES_NETDATA_PYTHON=${PACKAGES_NETDATA_PYTHON-0}
+PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-1}
+PACKAGES_DEBUG=${PACKAGES_DEBUG-0}
+PACKAGES_IPRANGE=${PACKAGES_IPRANGE-0}
+PACKAGES_FIREHOL=${PACKAGES_FIREHOL-0}
+PACKAGES_FIREQOS=${PACKAGES_FIREQOS-0}
+PACKAGES_UPDATE_IPSETS=${PACKAGES_UPDATE_IPSETS-0}
+PACKAGES_NETDATA_DEMO_SITE=${PACKAGES_NETDATA_DEMO_SITE-0}
+PACKAGES_NETDATA_SENSORS=${PACKAGES_NETDATA_SENSORS-0}
+PACKAGES_NETDATA_DATABASE=${PACKAGES_NETDATA_DATABASE-1}
+PACKAGES_NETDATA_STREAMING_COMPRESSION=${PACKAGES_NETDATA_STREAMING_COMPRESSION-0}
+PACKAGES_NETDATA_EBPF=${PACKAGES_NETDATA_EBPF-1}
+
+# needed commands
+lsb_release=$(command -v lsb_release 2> /dev/null)
+
+# Check which package managers are available
+apk=$(command -v apk 2> /dev/null)
+apt_get=$(command -v apt-get 2> /dev/null)
+brew=$(command -v brew 2> /dev/null)
+pkg=$(command -v pkg 2> /dev/null)
+dnf=$(command -v dnf 2> /dev/null)
+emerge=$(command -v emerge 2> /dev/null)
+equo=$(command -v equo 2> /dev/null)
+pacman=$(command -v pacman 2> /dev/null)
+swupd=$(command -v swupd 2> /dev/null)
+yum=$(command -v yum 2> /dev/null)
+zypper=$(command -v zypper 2> /dev/null)
+
+distribution=
+release=
+version=
+codename=
+package_installer=
+tree=
+detection=
+NAME=
+ID=
+ID_LIKE=
+VERSION=
+VERSION_ID=
+
+usage() {
+ cat << EOF
+OPTIONS:
+
+${ME} [--dont-wait] [--non-interactive] \\
+ [distribution DD [version VV] [codename CN]] [installer IN] [packages]
+
+Supported distributions (DD):
+
+ - arch (all Arch Linux derivatives)
+ - centos (all CentOS derivatives)
+ - gentoo (all Gentoo Linux derivatives)
+ - sabayon (all Sabayon Linux derivatives)
+ - debian, ubuntu (all Debian and Ubuntu derivatives)
+ - redhat, fedora (all Red Hat and Fedora derivatives)
+ - suse, opensuse (all SUSE and openSUSE derivatives)
+ - clearlinux (all Clear Linux derivatives)
+ - macos (Apple's macOS)
+
+Supported installers (IN):
+
+ - apt-get all Debian / Ubuntu Linux derivatives
+ - dnf newer Red Hat / Fedora Linux
+ - emerge all Gentoo Linux derivatives
+ - equo all Sabayon Linux derivatives
+ - pacman all Arch Linux derivatives
+ - yum all Red Hat / Fedora / CentOS Linux derivatives
+ - zypper all SUSE Linux derivatives
+ - apk all Alpine derivatives
+ - swupd all Clear Linux derivatives
+ - brew macOS Homebrew
+ - pkg FreeBSD Ports
+
+Supported packages (you can append many of them):
+
+ - netdata-all all packages required to install netdata
+ including python, sensors, etc
+
+ - netdata minimum packages required to install netdata
+ (includes python)
+
+ - python install python
+
+ - python3 install python3
+
+ - sensors install lm_sensors for monitoring h/w sensors
+
+ - firehol-all packages required for FireHOL, FireQOS, update-ipsets
+ - firehol packages required for FireHOL
+ - fireqos packages required for FireQOS
+ - update-ipsets packages required for update-ipsets
+
+ - demo packages required for running a netdata demo site
+ (includes nginx and various debugging tools)
+
+
+If you don't supply the --dont-wait option, the program
+will ask you before touching your system.
+
+EOF
+}
+
+release2lsb_release() {
+ # loads the given /etc/x-release file
+ # this file is normally a single line containing something like
+ #
+ # X Linux release 1.2.3 (release-name)
+ #
+ # It attempts to parse it
+ # If it succeeds, it returns 0
+ # otherwise it returns 1
+
+ local file="${1}" x DISTRIB_ID="" DISTRIB_RELEASE="" DISTRIB_CODENAME=""
+ echo >&2 "Loading ${file} ..."
+
+ x="$(grep -v "^$" "${file}" | head -n 1)"
+
+ if [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")"
+ fi
+
+ distribution="${DISTRIB_ID}"
+ version="${DISTRIB_RELEASE}"
+ codename="${DISTRIB_CODENAME}"
+
+ [ -z "${distribution}" ] && echo >&2 "Cannot parse this lsb-release: ${x}" && return 1
+ detection="${file}"
+ return 0
+}
+
+get_os_release() {
+ # Loads the /etc/os-release or /usr/lib/os-release file(s)
+ # Only the required fields are loaded
+ #
+ # If it manages to load a valid os-release, it returns 0
+ # otherwise it returns 1
+ #
+ # It searches the ID_LIKE field for a compatible distribution
+
+ os_release_file=
+ if [ -s "/etc/os-release" ]; then
+ os_release_file="/etc/os-release"
+ elif [ -s "/usr/lib/os-release" ]; then
+ os_release_file="/usr/lib/os-release"
+ else
+ echo >&2 "Cannot find an os-release file ..."
+ return 1
+ fi
+
+ local x
+ echo >&2 "Loading ${os_release_file} ..."
+
+ eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" "${os_release_file}")"
+ for x in "${ID}" ${ID_LIKE}; do
+ case "${x,,}" in
+ almalinux | alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | opensuse-tumbleweed | ol | rhel | rocky | sabayon | sles | suse | ubuntu)
+ distribution="${x}"
+ if [[ "${ID}" = "opensuse-tumbleweed" ]]; then
+ version="tumbleweed"
+ codename="tumbleweed"
+ else
+ version="${VERSION_ID}"
+ codename="${VERSION}"
+ fi
+ detection="${os_release_file}"
+ break
+ ;;
+ *)
+ echo >&2 "Unknown distribution ID: ${x}"
+ ;;
+ esac
+ done
+ [[ -z "${distribution}" ]] && echo >&2 "Cannot find valid distribution in: \
+${ID} ${ID_LIKE}" && return 1
+
+ [[ -z "${distribution}" ]] && return 1
+ return 0
+}
+
+get_lsb_release() {
+ # Loads the /etc/lsb-release file
+ # If it fails, it attempts to run the command: lsb_release -a
+ # and parse its output
+ #
+ # If it manages to find the lsb-release, it returns 0
+ # otherwise it returns 1
+
+ if [ -f "/etc/lsb-release" ]; then
+ echo >&2 "Loading /etc/lsb-release ..."
+ local DISTRIB_ID="" DISTRIB_RELEASE="" DISTRIB_CODENAME=""
+ eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" /etc/lsb-release)"
+ distribution="${DISTRIB_ID}"
+ version="${DISTRIB_RELEASE}"
+ codename="${DISTRIB_CODENAME}"
+ detection="/etc/lsb-release"
+ fi
+
+ if [ -z "${distribution}" ] && [ -n "${lsb_release}" ]; then
+ echo >&2 "Cannot find distribution with /etc/lsb-release"
+ echo >&2 "Running command: lsb_release ..."
+ eval "declare -A release=( $(lsb_release -a 2> /dev/null | sed -e "s|^\(.*\):[[:space:]]*\(.*\)$|[\1]=\"\2\"|g") )"
+ distribution="${release["Distributor ID"]}"
+ version="${release[Release]}"
+ codename="${release[Codename]}"
+ detection="lsb_release"
+ fi
+
+ [ -z "${distribution}" ] && echo >&2 "Cannot find valid distribution with lsb-release" && return 1
+ return 0
+}
+
+find_etc_any_release() {
+ # Check for any of the known /etc/x-release files
+ # If it finds one, it loads it and returns 0
+ # otherwise it returns 1
+
+ if [ -f "/etc/arch-release" ]; then
+ release2lsb_release "/etc/arch-release" && return 0
+ fi
+
+ if [ -f "/etc/centos-release" ]; then
+ release2lsb_release "/etc/centos-release" && return 0
+ fi
+
+ if [ -f "/etc/redhat-release" ]; then
+ release2lsb_release "/etc/redhat-release" && return 0
+ fi
+
+ if [ -f "/etc/SuSe-release" ]; then
+ release2lsb_release "/etc/SuSe-release" && return 0
+ fi
+
+ return 1
+}
+
+autodetect_distribution() {
+ # autodetection of distribution/OS
+ case "$(uname -s)" in
+ "Linux")
+ get_os_release || get_lsb_release || find_etc_any_release
+ ;;
+ "FreeBSD")
+ distribution="freebsd"
+ version="$(uname -r)"
+ detection="uname"
+ ;;
+ "Darwin")
+ distribution="macos"
+ version="$(uname -r)"
+ detection="uname"
+
+ if [ ${EUID} -eq 0 ]; then
+ echo >&2 "This script does not support running as EUID 0 on macOS. Please run it as a regular user."
+ exit 1
+ fi
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+}
+
+user_picks_distribution() {
+ # let the user pick a distribution
+
+ echo >&2
+ echo >&2 "I NEED YOUR HELP"
+ echo >&2 "It seems I cannot detect your system automatically."
+
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ echo >&2 " > Bailing out..."
+ exit 1
+ fi
+
+ if [ -z "${equo}" ] && [ -z "${emerge}" ] && [ -z "${apt_get}" ] && [ -z "${yum}" ] && [ -z "${dnf}" ] && [ -z "${pacman}" ] && [ -z "${apk}" ] && [ -z "${swupd}" ]; then
+ echo >&2 "And it seems I cannot find a known package manager in this system."
+ echo >&2 "Please open a github issue to help us support your system too."
+ exit 1
+ fi
+
+ local opts=
+ echo >&2 "I found though that the following installers are available:"
+ echo >&2
+ [ -n "${apt_get}" ] && echo >&2 " - Debian/Ubuntu based (installer is: apt-get)" && opts="apt-get ${opts}"
+ [ -n "${yum}" ] && echo >&2 " - Red Hat/Fedora/CentOS based (installer is: yum)" && opts="yum ${opts}"
+ [ -n "${dnf}" ] && echo >&2 " - Red Hat/Fedora/CentOS based (installer is: dnf)" && opts="dnf ${opts}"
+ [ -n "${zypper}" ] && echo >&2 " - SuSe based (installer is: zypper)" && opts="zypper ${opts}"
+ [ -n "${pacman}" ] && echo >&2 " - Arch Linux based (installer is: pacman)" && opts="pacman ${opts}"
+ [ -n "${emerge}" ] && echo >&2 " - Gentoo based (installer is: emerge)" && opts="emerge ${opts}"
+ [ -n "${equo}" ] && echo >&2 " - Sabayon based (installer is: equo)" && opts="equo ${opts}"
+ [ -n "${apk}" ] && echo >&2 " - Alpine Linux based (installer is: apk)" && opts="apk ${opts}"
+ [ -n "${swupd}" ] && echo >&2 " - Clear Linux based (installer is: swupd)" && opts="swupd ${opts}"
+ [ -n "${brew}" ] && echo >&2 " - macOS based (installer is: brew)" && opts="brew ${opts}"
+ # XXX: This is being removed in another PR.
+ echo >&2
+
+ REPLY=
+ while [ -z "${REPLY}" ]; do
+ echo "To proceed please write one of these:"
+ echo "${opts// /, }"
+ if ! read -r -p ">" REPLY; then
+ continue
+ fi
+
+ if [ "${REPLY}" = "yum" ] && [ -z "${distribution}" ]; then
+ REPLY=
+ while [ -z "${REPLY}" ]; do
+ if ! read -r -p "yum in centos, rhel, ol or fedora? > "; then
+ continue
+ fi
+
+ case "${REPLY,,}" in
+ fedora | rhel)
+ distribution="rhel"
+ ;;
+ ol)
+ distribution="ol"
+ ;;
+ centos)
+ distribution="centos"
+ ;;
+ *)
+ echo >&2 "Please enter 'centos', 'fedora', 'ol' or 'rhel'."
+ REPLY=
+ ;;
+ esac
+ done
+ REPLY="yum"
+ fi
+ check_package_manager "${REPLY}" || REPLY=
+ done
+}
+
+detect_package_manager_from_distribution() {
+ case "${1,,}" in
+ arch* | manjaro*)
+ package_installer="install_pacman"
+ tree="arch"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pacman}" ]; then
+ echo >&2 "command 'pacman' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ sabayon*)
+ package_installer="install_equo"
+ tree="sabayon"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${equo}" ]; then
+ echo >&2 "command 'equo' is required to install packages on a '${distribution} ${version}' system."
+ # Maybe offer to fall back on emerge? Both installers exist in Sabayon...
+ exit 1
+ fi
+ ;;
+
+ alpine*)
+ package_installer="install_apk"
+ tree="alpine"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apk}" ]; then
+ echo >&2 "command 'apk' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ gentoo*)
+ package_installer="install_emerge"
+ tree="gentoo"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${emerge}" ]; then
+ echo >&2 "command 'emerge' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ debian* | ubuntu*)
+ package_installer="install_apt_get"
+ tree="debian"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apt_get}" ]; then
+ echo >&2 "command 'apt-get' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ centos* | clearos* | rocky* | almalinux*)
+ package_installer=""
+ tree="centos"
+ [[ -n "${yum}" ]] && package_installer="install_yum"
+ [[ -n "${dnf}" ]] && package_installer="install_dnf"
+ if [[ "${IGNORE_INSTALLED}" -eq 0 ]] && [[ -z "${package_installer}" ]]; then
+ echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ fedora* | redhat* | red\ hat* | rhel*)
+ package_installer=
+ tree="rhel"
+ [[ -n "${yum}" ]] && package_installer="install_yum"
+ [[ -n "${dnf}" ]] && package_installer="install_dnf"
+ if [[ "${IGNORE_INSTALLED}" -eq 0 ]] && [[ -z "${package_installer}" ]]; then
+ echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ ol*)
+ package_installer=
+ tree="ol"
+ [ -n "${yum}" ] && package_installer="install_yum"
+ [ -n "${dnf}" ] && package_installer="install_dnf"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${package_installer}" ]; then
+ echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ suse* | opensuse* | sles*)
+ package_installer="install_zypper"
+ tree="suse"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${zypper}" ]; then
+ echo >&2 "command 'zypper' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ clear-linux* | clearlinux*)
+ package_installer="install_swupd"
+ tree="clearlinux"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${swupd}" ]; then
+ echo >&2 "command 'swupd' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ freebsd)
+ package_installer="install_pkg"
+ tree="freebsd"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pkg}" ]; then
+ echo >&2 "command 'pkg' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+ macos)
+ package_installer="install_brew"
+ tree="macos"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${brew}" ]; then
+ echo >&2 "command 'brew' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ *)
+ # oops! unknown system
+ user_picks_distribution
+ ;;
+ esac
+}
+
+# XXX: This is being removed in another PR.
+check_package_manager() {
+ # This is called only when the user is selecting a package manager
+ # It is used to verify the user selection is right
+
+ echo >&2 "Checking package manager: ${1}"
+
+ case "${1}" in
+ apt-get)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apt_get}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_apt_get"
+ tree="debian"
+ detection="user-input"
+ return 0
+ ;;
+
+ dnf)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${dnf}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_dnf"
+ if [ "${distribution}" = "centos" ]; then
+ tree="centos"
+ elif [ "${distribution}" = "ol" ]; then
+ tree="ol"
+ else
+ tree="rhel"
+ fi
+ detection="user-input"
+ return 0
+ ;;
+
+ apk)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apk}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_apk"
+ tree="alpine"
+ detection="user-input"
+ return 0
+ ;;
+
+ equo)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${equo}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_equo"
+ tree="sabayon"
+ detection="user-input"
+ return 0
+ ;;
+
+ emerge)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${emerge}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_emerge"
+ tree="gentoo"
+ detection="user-input"
+ return 0
+ ;;
+
+ pacman)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pacman}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_pacman"
+ tree="arch"
+ detection="user-input"
+
+ return 0
+ ;;
+
+ zypper)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${zypper}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_zypper"
+ tree="suse"
+ detection="user-input"
+ return 0
+ ;;
+
+ yum)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${yum}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_yum"
+ if [ "${distribution}" = "centos" ]; then
+ tree="centos"
+ elif [ "${distribution}" = "ol" ]; then
+ tree="ol"
+ else
+ tree="rhel"
+ fi
+ detection="user-input"
+ return 0
+ ;;
+
+ swupd)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${swupd}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_swupd"
+ tree="clear-linux"
+ detection="user-input"
+ return 0
+ ;;
+
+ brew)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${brew}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_brew"
+ tree="macos"
+ detection="user-input"
+
+ return 0
+ ;;
+
+ *)
+ echo >&2 "Invalid package manager: '${1}'."
+ return 1
+ ;;
+ esac
+}
+
+require_cmd() {
+ # check if any of the commands given as argument
+ # are present on this system
+ # If any of them is available, it returns 0
+ # otherwise 1
+
+ [ "${IGNORE_INSTALLED}" -eq 1 ] && return 1
+
+ local wanted found
+ for wanted in "${@}"; do
+ if command -v "${wanted}" > /dev/null 2>&1; then
+ found="$(command -v "$wanted" 2> /dev/null)"
+ fi
+ [ -n "${found}" ] && [ -x "${found}" ] && return 0
+ done
+ return 1
+}
+
+declare -A pkg_find=(
+ ['gentoo']="sys-apps/findutils"
+ ['fedora']="findutils"
+ ['clearlinux']="findutils"
+ ['rhel']="findutils"
+ ['centos']="findutils"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="WARNING|"
+)
+
+declare -A pkg_distro_sdk=(
+ ['alpine']="alpine-sdk"
+ ['default']="NOTREQUIRED"
+)
+
+declare -A pkg_autoconf=(
+ ['gentoo']="sys-devel/autoconf"
+ ['clearlinux']="c-basic"
+ ['default']="autoconf"
+)
+
+# required to compile netdata with --enable-sse
+# https://github.com/firehol/netdata/pull/450
+declare -A pkg_autoconf_archive=(
+ ['gentoo']="sys-devel/autoconf-archive"
+ ['clearlinux']="c-basic"
+ ['alpine']="WARNING|"
+ ['default']="autoconf-archive"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+ ['rhel-6']="WARNING|"
+ ['rhel-7']="WARNING|"
+)
+
+declare -A pkg_autogen=(
+ ['gentoo']="sys-devel/autogen"
+ ['clearlinux']="c-basic"
+ ['alpine']="WARNING|"
+ ['default']="autogen"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+ ['rhel-6']="WARNING|"
+ ['centos-9']="NOTREQUIRED|"
+ ['rhel-9']="NOTREQUIRED|"
+)
+
+declare -A pkg_automake=(
+ ['gentoo']="sys-devel/automake"
+ ['clearlinux']="c-basic"
+ ['default']="automake"
+)
+
+# Required to build libwebsockets and libmosquitto on some systems.
+declare -A pkg_cmake=(
+ ['gentoo']="dev-util/cmake"
+ ['clearlinux']="c-basic"
+ ['default']="cmake"
+)
+
+# bison and flex are required by Fluent-Bit
+declare -A pkg_bison=(
+ ['default']="bison"
+)
+
+declare -A pkg_flex=(
+ ['default']="flex"
+)
+
+# fts-dev is required by Fluent-Bit on Alpine
+declare -A pkg_fts_dev=(
+ ['default']="NOTREQUIRED"
+ ['alpine']="musl-fts-dev"
+ ['alpine-3.16.9']="fts-dev"
+)
+
+# cmake3 is required by Fluent-Bit on CentOS 7
+declare -A pkg_cmake3=(
+ ['default']="NOTREQUIRED"
+ ['centos-7']="cmake3"
+)
+
+declare -A pkg_json_c_dev=(
+ ['alpine']="json-c-dev"
+ ['arch']="json-c"
+ ['clearlinux']="devpkg-json-c"
+ ['debian']="libjson-c-dev"
+ ['gentoo']="dev-libs/json-c"
+ ['sabayon']="dev-libs/json-c"
+ ['suse']="libjson-c-devel"
+ ['freebsd']="json-c"
+ ['macos']="json-c"
+ ['default']="json-c-devel"
+)
+
+#TODO:: clearlinux ?
+declare -A pkg_libyaml_dev=(
+ ['alpine']="yaml-dev"
+ ['arch']="libyaml"
+ ['clearlinux']="yaml-dev"
+ ['debian']="libyaml-dev"
+ ['gentoo']="dev-libs/libyaml"
+ ['sabayon']="dev-libs/libyaml"
+ ['suse']="libyaml-devel"
+ ['freebsd']="libyaml"
+ ['macos']="libyaml"
+ ['default']="libyaml-devel"
+)
+
+declare -A pkg_libatomic=(
+ ['arch']="NOTREQUIRED"
+ ['clearlinux']="NOTREQUIRED"
+ ['debian']="libatomic1"
+ ['freebsd']="NOTREQUIRED"
+ ['gentoo']="NOTREQUIRED"
+ ['macos']="NOTREQUIRED"
+ ['sabayon']="NOTREQUIRED"
+ ['suse']="libatomic1"
+ ['ubuntu']="libatomic1"
+ ['default']="libatomic"
+)
+
+declare -A pkg_libsystemd_dev=(
+ ['alpine']="NOTREQUIRED"
+ ['arch']="NOTREQUIRED" # inherently present on systems actually using systemd
+ ['clearlinux']="system-os-dev"
+ ['debian']="libsystemd-dev"
+ ['freebsd']="NOTREQUIRED"
+ ['gentoo']="NOTREQUIRED" # inherently present on systems actually using systemd
+ ['macos']="NOTREQUIRED"
+ ['sabayon']="NOTREQUIRED" # inherently present on systems actually using systemd
+ ['ubuntu']="libsystemd-dev"
+ ['default']="systemd-devel"
+)
+
+declare -A pkg_bridge_utils=(
+ ['gentoo']="net-misc/bridge-utils"
+ ['clearlinux']="network-basic"
+ ['macos']="WARNING|"
+ ['default']="bridge-utils"
+)
+
+declare -A pkg_curl=(
+ ['gentoo']="net-misc/curl"
+ ['sabayon']="net-misc/curl"
+ ['default']="curl"
+)
+
+declare -A pkg_gzip=(
+ ['gentoo']="app-arch/gzip"
+ ['macos']="NOTREQUIRED"
+ ['default']="gzip"
+)
+
+declare -A pkg_tar=(
+ ['gentoo']="app-arch/tar"
+ ['clearlinux']="os-core-update"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="tar"
+)
+
+declare -A pkg_git=(
+ ['gentoo']="dev-vcs/git"
+ ['default']="git"
+)
+
+declare -A pkg_gcc=(
+ ['gentoo']="sys-devel/gcc"
+ ['clearlinux']="c-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="gcc"
+)
+
+# g++, required for building protobuf
+# All three cases of this not being required are systems that implicitly
+# include g++ when installing gcc.
+declare -A pkg_gxx=(
+ ['alpine']="g++"
+ ['arch']="NOTREQUIRED"
+ ['clearlinux']="c-basic"
+ ['debian']="g++"
+ ['gentoo']="NOTREQUIRED"
+ ['macos']="NOTREQUIRED"
+ ['ubuntu']="g++"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="gcc-c++"
+)
+
+declare -A pkg_gdb=(
+ ['gentoo']="sys-devel/gdb"
+ ['macos']="NOTREQUIRED"
+ ['default']="gdb"
+)
+
+declare -A pkg_iotop=(
+ ['gentoo']="sys-process/iotop"
+ ['macos']="WARNING|"
+ ['default']="iotop"
+)
+
+declare -A pkg_iproute2=(
+ ['alpine']="iproute2"
+ ['debian']="iproute2"
+ ['gentoo']="sys-apps/iproute2"
+ ['sabayon']="sys-apps/iproute2"
+ ['clearlinux']="iproute2"
+ ['macos']="WARNING|"
+ ['default']="iproute"
+
+ # exceptions
+ ['ubuntu-12.04']="iproute"
+)
+
+declare -A pkg_ipset=(
+ ['gentoo']="net-firewall/ipset"
+ ['clearlinux']="network-basic"
+ ['macos']="WARNING|"
+ ['default']="ipset"
+)
+
+declare -A pkg_jq=(
+ ['gentoo']="app-misc/jq"
+ ['default']="jq"
+)
+
+declare -A pkg_iptables=(
+ ['gentoo']="net-firewall/iptables"
+ ['macos']="WARNING|"
+ ['default']="iptables"
+)
+
+declare -A pkg_libz_dev=(
+ ['alpine']="zlib-dev"
+ ['arch']="zlib"
+ ['centos']="zlib-devel"
+ ['debian']="zlib1g-dev"
+ ['gentoo']="sys-libs/zlib"
+ ['sabayon']="sys-libs/zlib"
+ ['rhel']="zlib-devel"
+ ['ol']="zlib-devel"
+ ['suse']="zlib-devel"
+ ['clearlinux']="devpkg-zlib"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="lzlib"
+ ['default']=""
+)
+
+declare -A pkg_libuuid_dev=(
+ ['alpine']="util-linux-dev"
+ ['arch']="util-linux"
+ ['centos']="libuuid-devel"
+ ['clearlinux']="devpkg-util-linux"
+ ['debian']="uuid-dev"
+ ['gentoo']="sys-apps/util-linux"
+ ['sabayon']="sys-apps/util-linux"
+ ['rhel']="libuuid-devel"
+ ['ol']="libuuid-devel"
+ ['suse']="libuuid-devel"
+ ['macos']="ossp-uuid"
+ ['freebsd']="e2fsprogs-libuuid"
+ ['default']=""
+)
+
+declare -A pkg_libmnl_dev=(
+ ['alpine']="libmnl-dev"
+ ['arch']="libmnl"
+ ['centos']="libmnl-devel"
+ ['debian']="libmnl-dev"
+ ['gentoo']="net-libs/libmnl"
+ ['sabayon']="net-libs/libmnl"
+ ['rhel']="libmnl-devel"
+ ['ol']="libmnl-devel"
+ ['suse']="libmnl-devel"
+ ['clearlinux']="devpkg-libmnl"
+ ['macos']="NOTREQUIRED"
+ ['default']=""
+)
+
+declare -A pkg_lm_sensors=(
+ ['alpine']="lm_sensors"
+ ['arch']="lm_sensors"
+ ['centos']="lm_sensors"
+ ['debian']="lm-sensors"
+ ['gentoo']="sys-apps/lm-sensors"
+ ['sabayon']="sys-apps/lm_sensors"
+ ['rhel']="lm_sensors"
+ ['suse']="sensors"
+ ['clearlinux']="lm-sensors"
+ ['macos']="WARNING|"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="lm_sensors"
+)
+
+declare -A pkg_logwatch=(
+ ['gentoo']="sys-apps/logwatch"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="logwatch"
+)
+
+declare -A pkg_lxc=(
+ ['gentoo']="app-emulation/lxc"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="lxc"
+)
+
+declare -A pkg_mailutils=(
+ ['gentoo']="net-mail/mailutils"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="mailutils"
+)
+
+declare -A pkg_make=(
+ ['gentoo']="sys-devel/make"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="gmake"
+ ['default']="make"
+)
+
+declare -A pkg_nginx=(
+ ['gentoo']="www-servers/nginx"
+ ['default']="nginx"
+)
+
+declare -A pkg_postfix=(
+ ['gentoo']="mail-mta/postfix"
+ ['macos']="WARNING|"
+ ['default']="postfix"
+)
+
+declare -A pkg_pkg_config=(
+ ['alpine']="pkgconfig"
+ ['arch']="pkgconfig"
+ ['centos']="pkgconfig"
+ ['debian']="pkg-config"
+ ['gentoo']="virtual/pkgconfig"
+ ['sabayon']="virtual/pkgconfig"
+ ['rhel']="pkgconfig"
+ ['ol']="pkgconfig"
+ ['suse']="pkg-config"
+ ['freebsd']="pkgconf"
+ ['clearlinux']="c-basic"
+ ['default']="pkg-config"
+)
+
+declare -A pkg_python=(
+ ['gentoo']="dev-lang/python"
+ ['sabayon']="dev-lang/python:2.7"
+ ['clearlinux']="python-basic"
+ ['default']="python"
+
+ # Exceptions
+ ['macos']="WARNING|"
+ ['centos-8']="python2"
+)
+
+declare -A pkg_python_pip=(
+ ['alpine']="py-pip"
+ ['gentoo']="dev-python/pip"
+ ['sabayon']="dev-python/pip"
+ ['clearlinux']="python-basic"
+ ['macos']="WARNING|"
+ ['default']="python-pip"
+)
+
+declare -A pkg_python3_pip=(
+ ['alpine']="py3-pip"
+ ['arch']="python-pip"
+ ['gentoo']="dev-python/pip"
+ ['sabayon']="dev-python/pip"
+ ['clearlinux']="python3-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="python3-pip"
+)
+
+declare -A pkg_python_requests=(
+ ['alpine']="py-requests"
+ ['arch']="python2-requests"
+ ['centos']="python-requests"
+ ['debian']="python-requests"
+ ['gentoo']="dev-python/requests"
+ ['sabayon']="dev-python/requests"
+ ['rhel']="python-requests"
+ ['suse']="python-requests"
+ ['clearlinux']="python-extras"
+ ['macos']="WARNING|"
+ ['default']="python-requests"
+ ['alpine-3.1.4']="WARNING|"
+ ['alpine-3.2.3']="WARNING|"
+)
+
+declare -A pkg_python3_requests=(
+ ['alpine']="py3-requests"
+ ['arch']="python-requests"
+ ['centos']="WARNING|"
+ ['debian']="WARNING|"
+ ['gentoo']="dev-python/requests"
+ ['sabayon']="dev-python/requests"
+ ['rhel']="WARNING|"
+ ['suse']="WARNING|"
+ ['clearlinux']="python-extras"
+ ['macos']="WARNING|"
+ ['default']="WARNING|"
+
+ ['centos-7']="python36-requests"
+ ['centos-8']="python3-requests"
+ ['rhel-7']="python36-requests"
+ ['rhel-8']="python3-requests"
+ ['ol-8']="python3-requests"
+)
+
+declare -A pkg_lz4=(
+ ['alpine']="lz4-dev"
+ ['debian']="liblz4-dev"
+ ['ubuntu']="liblz4-dev"
+ ['suse']="liblz4-devel"
+ ['gentoo']="app-arch/lz4"
+ ['clearlinux']="devpkg-lz4"
+ ['arch']="lz4"
+ ['macos']="lz4"
+ ['freebsd']="liblz4"
+ ['default']="lz4-devel"
+)
+
+declare -A pkg_zstd=(
+ ['alpine']="zstd-dev"
+ ['debian']="libzstd-dev"
+ ['ubuntu']="libzstd-dev"
+ ['gentoo']="app-arch/zstd"
+ ['clearlinux']="zstd-devel"
+ ['arch']="zstd"
+ ['macos']="zstd"
+ ['freebsd']="zstd"
+ ['default']="libzstd-devel"
+)
+
+declare -A pkg_libuv=(
+ ['alpine']="libuv-dev"
+ ['debian']="libuv1-dev"
+ ['ubuntu']="libuv1-dev"
+ ['gentoo']="dev-libs/libuv"
+ ['arch']="libuv"
+ ['clearlinux']="devpkg-libuv"
+ ['macos']="libuv"
+ ['freebsd']="libuv"
+ ['default']="libuv-devel"
+)
+
+declare -A pkg_openssl=(
+ ['alpine']="openssl-dev"
+ ['debian']="libssl-dev"
+ ['ubuntu']="libssl-dev"
+ ['suse']="libopenssl-devel"
+ ['clearlinux']="devpkg-openssl"
+ ['gentoo']="dev-libs/openssl"
+ ['arch']="openssl"
+ ['freebsd']="openssl"
+ ['macos']="openssl"
+ ['default']="openssl-devel"
+)
+
+declare -A pkg_python3=(
+ ['gentoo']="dev-lang/python"
+ ['sabayon']="dev-lang/python:3.4"
+ ['clearlinux']="python3-basic"
+ ['macos']="python"
+ ['default']="python3"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+)
+
+declare -A pkg_screen=(
+ ['gentoo']="app-misc/screen"
+ ['sabayon']="app-misc/screen"
+ ['clearlinux']="sysadmin-basic"
+ ['default']="screen"
+)
+
+declare -A pkg_sudo=(
+ ['gentoo']="app-admin/sudo"
+ ['macos']="NOTREQUIRED"
+ ['default']="sudo"
+)
+
+declare -A pkg_sysstat=(
+ ['gentoo']="app-admin/sysstat"
+ ['macos']="WARNING|"
+ ['default']="sysstat"
+)
+
+declare -A pkg_tcpdump=(
+ ['gentoo']="net-analyzer/tcpdump"
+ ['clearlinux']="network-basic"
+ ['default']="tcpdump"
+)
+
+declare -A pkg_traceroute=(
+ ['alpine']=" "
+ ['gentoo']="net-analyzer/traceroute"
+ ['clearlinux']="network-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="traceroute"
+)
+
+declare -A pkg_valgrind=(
+ ['gentoo']="dev-util/valgrind"
+ ['default']="valgrind"
+)
+
+declare -A pkg_ulogd=(
+ ['centos']="WARNING|"
+ ['rhel']="WARNING|"
+ ['ol']="WARNING|"
+ ['clearlinux']="WARNING|"
+ ['gentoo']="app-admin/ulogd"
+ ['arch']="ulogd"
+ ['macos']="WARNING|"
+ ['default']="ulogd2"
+)
+
+declare -A pkg_unzip=(
+ ['gentoo']="app-arch/unzip"
+ ['macos']="NOTREQUIRED"
+ ['default']="unzip"
+)
+
+declare -A pkg_zip=(
+ ['gentoo']="app-arch/zip"
+ ['macos']="NOTREQUIRED"
+ ['default']="zip"
+)
+
+declare -A pkg_libelf=(
+ ['alpine']="elfutils-dev"
+ ['arch']="libelf"
+ ['gentoo']="virtual/libelf"
+ ['sabayon']="virtual/libelf"
+ ['debian']="libelf-dev"
+ ['ubuntu']="libelf-dev"
+ ['fedora']="elfutils-libelf-devel"
+ ['centos']="elfutils-libelf-devel"
+ ['rhel']="elfutils-libelf-devel"
+ ['ol']="elfutils-libelf-devel"
+ ['clearlinux']="devpkg-elfutils"
+ ['suse']="libelf-devel"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="libelf-devel"
+
+ # exceptions
+ ['alpine-3.5']="libelf-dev"
+ ['alpine-3.4']="libelf-dev"
+ ['alpine-3.3']="libelf-dev"
+)
+
+validate_package_trees() {
+ if type -t validate_tree_${tree} > /dev/null; then
+ validate_tree_${tree}
+ fi
+}
+
+validate_installed_package() {
+ validate_${package_installer} "${p}"
+}
+
+suitable_package() {
+ local package="${1//-/_}" p="" v="${version//.*/}"
+
+ echo >&2 "Searching for ${package} ..."
+
+ eval "p=\${pkg_${package}['${distribution,,}-${version,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}-${v,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${version}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${v}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['default']}"
+
+ if [[ "${p/|*/}" =~ ^(ERROR|WARNING|INFO)$ ]]; then
+ echo >&2 "${p/|*/}"
+ echo >&2 "package ${1} is not available in this system."
+ if [ -z "${p/*|/}" ]; then
+ echo >&2 "You may try to install without it."
+ else
+ echo >&2 "${p/*|/}"
+ fi
+ echo >&2
+ return 1
+ elif [ "${p}" = "NOTREQUIRED" ]; then
+ return 0
+ elif [ -z "${p}" ]; then
+ echo >&2 "WARNING"
+ echo >&2 "package ${1} is not available in this system."
+ echo >&2
+ return 1
+ else
+ if [ "${IGNORE_INSTALLED}" -eq 0 ]; then
+ validate_installed_package "${p}"
+ else
+ echo "${p}"
+ fi
+ return 0
+ fi
+}
+
+packages() {
+ # detect the packages we need to install on this system
+
+ # -------------------------------------------------------------------------
+ # basic build environment
+
+ suitable_package distro-sdk
+ suitable_package libatomic
+
+ require_cmd git || suitable_package git
+ require_cmd find || suitable_package find
+
+ require_cmd gcc || require_cmd clang ||
+ require_cmd gcc-multilib || suitable_package gcc
+ require_cmd g++ || require_cmd clang++ || suitable_package gxx
+
+ require_cmd make || suitable_package make
+ require_cmd autoconf || suitable_package autoconf
+ suitable_package autoconf-archive
+ require_cmd autogen || suitable_package autogen
+ require_cmd automake || suitable_package automake
+ require_cmd pkg-config || suitable_package pkg-config
+ require_cmd cmake || suitable_package cmake
+ require_cmd cmake3 || suitable_package cmake3
+
+ # -------------------------------------------------------------------------
+ # debugging tools for development
+
+ if [ "${PACKAGES_DEBUG}" -ne 0 ]; then
+ require_cmd traceroute || suitable_package traceroute
+ require_cmd tcpdump || suitable_package tcpdump
+ require_cmd screen || suitable_package screen
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ require_cmd gdb || suitable_package gdb
+ require_cmd valgrind || suitable_package valgrind
+ fi
+ fi
+
+ # -------------------------------------------------------------------------
+ # common command line tools
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ require_cmd tar || suitable_package tar
+ require_cmd curl || suitable_package curl
+ require_cmd gzip || suitable_package gzip
+ require_cmd bison || suitable_package bison
+ require_cmd flex || suitable_package flex
+ fi
+
+ # -------------------------------------------------------------------------
+ # firehol/fireqos/update-ipsets command line tools
+
+ if [ "${PACKAGES_FIREQOS}" -ne 0 ]; then
+ require_cmd ip || suitable_package iproute2
+ fi
+
+ if [ "${PACKAGES_FIREHOL}" -ne 0 ]; then
+ require_cmd iptables || suitable_package iptables
+ require_cmd ipset || suitable_package ipset
+ require_cmd ulogd ulogd2 || suitable_package ulogd
+ require_cmd traceroute || suitable_package traceroute
+ require_cmd bridge || suitable_package bridge-utils
+ fi
+
+ if [ "${PACKAGES_UPDATE_IPSETS}" -ne 0 ]; then
+ require_cmd ipset || suitable_package ipset
+ require_cmd zip || suitable_package zip
+ require_cmd funzip || suitable_package unzip
+ fi
+
+ # -------------------------------------------------------------------------
+ # netdata libraries
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ suitable_package libz-dev
+ suitable_package libuuid-dev
+ suitable_package libmnl-dev
+ suitable_package json-c-dev
+ suitable_package fts-dev
+ suitable_package libyaml-dev
+ suitable_package libsystemd-dev
+ fi
+
+ # -------------------------------------------------------------------------
+ # sensors
+
+ if [ "${PACKAGES_NETDATA_SENSORS}" -ne 0 ]; then
+ require_cmd sensors || suitable_package lm_sensors
+ fi
+
+ # -------------------------------------------------------------------------
+ # netdata database
+ if [ "${PACKAGES_NETDATA_DATABASE}" -ne 0 ]; then
+ suitable_package libuv
+ suitable_package lz4
+ suitable_package openssl
+ fi
+
+ if [ "${PACKAGES_NETDATA_STREAMING_COMPRESSION}" -ne 0 ]; then
+ suitable_package zstd
+ fi
+
+ # -------------------------------------------------------------------------
+ # ebpf plugin
+ if [ "${PACKAGES_NETDATA_EBPF}" -ne 0 ]; then
+ suitable_package libelf
+ fi
+
+ # -------------------------------------------------------------------------
+ # python2
+
+ if [ "${PACKAGES_NETDATA_PYTHON}" -ne 0 ]; then
+ require_cmd python || suitable_package python
+
+ # suitable_package python-requests
+ # suitable_package python-pip
+ fi
+
+ # -------------------------------------------------------------------------
+ # python3
+
+ if [ "${PACKAGES_NETDATA_PYTHON3}" -ne 0 ]; then
+ require_cmd python3 || suitable_package python3
+
+ # suitable_package python3-requests
+ # suitable_package python3-pip
+ fi
+
+ # -------------------------------------------------------------------------
+ # applications needed for the netdata demo sites
+
+ if [ "${PACKAGES_NETDATA_DEMO_SITE}" -ne 0 ]; then
+ require_cmd sudo || suitable_package sudo
+ require_cmd jq || suitable_package jq
+ require_cmd nginx || suitable_package nginx
+ require_cmd postconf || suitable_package postfix
+ require_cmd lxc-create || suitable_package lxc
+ require_cmd logwatch || suitable_package logwatch
+ require_cmd mail || suitable_package mailutils
+ require_cmd iostat || suitable_package sysstat
+ require_cmd iotop || suitable_package iotop
+ fi
+}
+
+DRYRUN=0
+run() {
+
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+
+ if [ ! "${DRYRUN}" -eq 1 ]; then
+ "${@}"
+ return $?
+ fi
+ return 0
+}
+
+sudo=
+if [ ${UID} -ne 0 ]; then
+ sudo="sudo"
+fi
+
+# -----------------------------------------------------------------------------
+# debian / ubuntu
+
+validate_install_apt_get() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ [ "$(dpkg-query -W --showformat='${Status}\n' "${*}")" = "install ok installed" ] || echo "${*}"
+}
+
+install_apt_get() {
+ local opts=""
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://serverfault.com/questions/227190/how-do-i-ask-apt-get-to-skip-any-interactive-post-install-configuration-steps
+ export DEBIAN_FRONTEND="noninteractive"
+ opts="${opts} -yq"
+ fi
+
+ read -r -a apt_opts <<< "$opts"
+
+ # update apt repository caches
+
+ echo >&2 "NOTE: Running apt-get update and updating your APT caches ..."
+ if [ "${version}" = 8 ]; then
+ echo >&2 "WARNING: You seem to be on Debian 8 (jessie) which is old enough we have to disable Check-Valid-Until checks"
+ if ! cat /etc/apt/sources.list /etc/apt/sources.list.d/* 2> /dev/null | grep -q jessie-backports; then
+ echo >&2 "We also have to enable the jessie-backports repository"
+ if prompt "Is this okay?"; then
+ ${sudo} /bin/sh -c 'echo "deb http://archive.debian.org/debian/ jessie-backports main contrib non-free" >> /etc/apt/sources.list.d/99-archived.list'
+ fi
+ fi
+ run ${sudo} apt-get "${apt_opts[@]}" -o Acquire::Check-Valid-Until=false update
+ else
+ run ${sudo} apt-get "${apt_opts[@]}" update
+ fi
+
+ # install the required packages
+ run ${sudo} apt-get "${apt_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# centos / rhel
+
+prompt() {
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have prompted for ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+validate_tree_freebsd() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > FreeBSD Version: ${version} ..."
+
+ make="make"
+ echo >&2 " > Checking for gmake ..."
+ if ! pkg query %n-%v | grep -q gmake; then
+ if prompt "gmake is required to build on FreeBSD and is not installed. Shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} pkg install ${opts} gmake
+ fi
+ fi
+}
+
+validate_tree_ol() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ if [[ "${version}" =~ ^8(\..*)?$ ]]; then
+ echo " > Checking for CodeReady Builder ..."
+ if ! run ${sudo} dnf repolist | grep -q codeready; then
+ if prompt "CodeReady Builder not found, shall I install it?"; then
+ cat > /etc/yum.repos.d/ol8_codeready.repo <<-EOF
+ [ol8_codeready_builder]
+ name=Oracle Linux \$releasever CodeReady Builder (\$basearch)
+ baseurl=http://yum.oracle.com/repo/OracleLinux/OL8/codeready/builder/\$basearch
+ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
+ gpgcheck=1
+ enabled=1
+ EOF
+ fi
+ fi
+ elif [[ "${version}" =~ ^9(\..*)?$ ]]; then
+ echo " > Checking for CodeReady Builder ..."
+ if ! run ${sudo} dnf repolist enabled | grep -q codeready; then
+ if prompt "CodeReady Builder not enabled, shall I enable it?"; then
+ run ${sudo} dnf config-manager --set-enabled ol9_codeready_builder
+ fi
+ fi
+ fi
+}
+
+validate_tree_centos() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > CentOS Version: ${version} ..."
+
+ if [[ "${version}" =~ ^9(\..*)?$ ]]; then
+ echo >&2 " > Checking for config-manager ..."
+ if ! run ${sudo} dnf config-manager --help; then
+ if prompt "config-manager not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} dnf ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for CRB ..."
+ # shellcheck disable=2086
+ if ! run dnf ${sudo} repolist | grep CRB; then
+ if prompt "CRB not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} dnf ${opts} config-manager --set-enabled crb
+ fi
+ fi
+ elif [[ "${version}" =~ ^8(\..*)?$ ]]; then
+ echo >&2 " > Checking for config-manager ..."
+ if ! run ${sudo} yum config-manager --help; then
+ if prompt "config-manager not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} yum ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for PowerTools ..."
+ # shellcheck disable=2086
+ if ! run yum ${sudo} repolist | grep PowerTools; then
+ if prompt "PowerTools not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} yum ${opts} config-manager --set-enabled powertools
+ fi
+ fi
+
+ echo >&2 " > Updating libarchive ..."
+ # shellcheck disable=2086
+ run ${sudo} yum ${opts} install libarchive
+
+ elif [[ "${version}" =~ ^7(\..*)?$ ]]; then
+ echo >&2 " > Checking for EPEL ..."
+ if ! rpm -qa | grep epel-release > /dev/null; then
+ if prompt "EPEL not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} yum ${opts} install epel-release
+ fi
+ fi
+ elif [[ "${version}" =~ ^6\..*$ ]]; then
+ echo >&2 " > Detected CentOS 6.x ..."
+ echo >&2 " > Checking for Okay ..."
+ if ! rpm -qa | grep okay > /dev/null; then
+ if prompt "okay not found, shall I install it?"; then
+ # shellcheck disable=2086
+ run ${sudo} yum ${opts} install http://repo.okay.com.mx/centos/6/x86_64/release/okay-release-1-3.el6.noarch.rpm
+ fi
+ fi
+
+ fi
+}
+
+validate_install_yum() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ yum list installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_yum() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} yum update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/87822/does-yum-have-an-equivalent-to-apt-aptitudes-debian-frontend-noninteractive
+ opts="-y"
+ fi
+
+ read -r -a yum_opts <<< "${opts}"
+
+ # install the required packages
+ run ${sudo} yum "${yum_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# fedora
+
+validate_install_dnf() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ dnf list installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_dnf() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} dnf update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # man dnf
+ opts="-y"
+ fi
+
+ # install the required packages
+ # --setopt=strict=0 allows dnf to proceed
+ # installing whatever is available
+ # even if a package is not found
+ opts="$opts --setopt=strict=0"
+ read -r -a dnf_opts <<< "$opts"
+ run ${sudo} dnf "${dnf_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# gentoo
+
+validate_install_emerge() {
+ echo "${*}"
+}
+
+install_emerge() {
+ # download the latest package info
+ # we don't do this for emerge - it is very slow
+ # and most users are expected to do this daily
+ # emerge --sync
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} emerge --sync or ${sudo} eix-sync "
+ echo >&2
+ fi
+
+ local opts="--ask"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts=""
+ fi
+
+ read -r -a emerge_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} emerge "${emerge_opts[@]}" -v --noreplace "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# alpine
+
+validate_install_apk() {
+ echo "${*}"
+}
+
+install_apk() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} apk update "
+ echo >&2
+ fi
+
+ local opts="--force-broken-world"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ else
+ opts="${opts} -i"
+ fi
+
+ read -r -a apk_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} apk add "${apk_opts[@]}" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# sabayon
+
+validate_install_equo() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ equo s --installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_equo() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} equo up "
+ echo >&2
+ fi
+
+ local opts="-av"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-v"
+ fi
+
+ read -r -a equo_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} equo i "${equo_opts[@]}" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# arch
+
+PACMAN_DB_SYNCED=0
+validate_install_pacman() {
+
+ if [ "${PACMAN_DB_SYNCED}" -eq 0 ]; then
+ echo >&2 " > Running pacman -Sy to sync the database"
+ local x
+ x=$(pacman -Sy)
+ [ -z "${x}" ] && echo "${*}"
+ PACMAN_DB_SYNCED=1
+ fi
+ echo >&2 " > Checking if package '${*}' is installed..."
+
+ # In pacman, you can utilize alternative flags to exactly match package names,
+ # but is highly likely we require pattern matching too in this so we keep -s and match
+ # the exceptional cases like so
+ local x=""
+ case "${package}" in
+ "gcc")
+ # Temporary workaround: In archlinux, default installation includes runtime libs under package "gcc"
+ # These are not sufficient for netdata install, so we need to make sure that the appropriate libraries are there
+ # by ensuring devel libs are available
+ x=$(pacman -Qs "${*}" | grep "base-devel")
+ ;;
+ "tar")
+ x=$(pacman -Qs "${*}" | grep "local/tar")
+ ;;
+ "make")
+ x=$(pacman -Qs "${*}" | grep "local/make ")
+ ;;
+ *)
+ x=$(pacman -Qs "${*}")
+ ;;
+ esac
+
+ [ -z "${x}" ] && echo "${*}"
+}
+
+install_pacman() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} pacman -Syu "
+ echo >&2
+ fi
+
+ # install the required packages
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/52277/pacman-option-to-assume-yes-to-every-question/52278
+ # Try the noconfirm option, if that fails, go with the legacy way for non-interactive
+ run ${sudo} pacman --noconfirm --needed -S "${@}" || yes | run ${sudo} pacman --needed -S "${@}"
+ else
+ run ${sudo} pacman --needed -S "${@}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# suse / opensuse
+
+validate_install_zypper() {
+ rpm -q "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_zypper() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} zypper update "
+ echo >&2
+ fi
+
+ local opts="--ignore-unknown"
+ local install_opts="--allow-downgrade"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/82016/how-to-use-zypper-in-bash-scripts-for-someone-coming-from-apt-get
+ opts="${opts} --non-interactive"
+ fi
+
+ read -r -a zypper_opts <<< "$opts"
+ # install the required packages
+ run ${sudo} zypper "${zypper_opts[@]}" install "${install_opts}" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# clearlinux
+
+validate_install_swupd() {
+ swupd bundle-list | grep -q "${*}" || echo "${*}"
+}
+
+install_swupd() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} swupd update "
+ echo >&2
+ fi
+
+ run ${sudo} swupd bundle-add "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# macOS
+
+validate_install_pkg() {
+ pkg query %n-%v | grep -q "${*}" || echo "${*}"
+}
+
+validate_install_brew() {
+ brew list | grep -q "${*}" || echo "${*}"
+}
+
+install_pkg() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: pkg update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ read -r -a pkg_opts <<< "${opts}"
+
+ run ${sudo} pkg install "${pkg_opts[@]}" "${@}"
+}
+
+install_brew() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: brew upgrade "
+ echo >&2
+ fi
+
+ run brew install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+install_failed() {
+ local ret="${1}"
+ cat << EOF
+
+
+
+We are very sorry!
+
+Installation of required packages failed.
+
+What to do now:
+
+ 1. Make sure your system is updated.
+ Most of the times, updating your system will resolve the issue.
+
+ 2. If the error message is about a specific package, try removing
+ that package from the command and run it again.
+ Depending on the broken package, you may be able to continue.
+
+ 3. Let us know. We may be able to help.
+ Open a github issue with the above log, at:
+
+ https://github.com/netdata/netdata/issues
+
+
+EOF
+ remote_log "FAILED" "${ret}"
+ exit 1
+}
+
+remote_log() {
+ # log success or failure on our system
+ # to help us solve installation issues
+ curl > /dev/null 2>&1 -Ss --max-time 3 "https://registry.my-netdata.io/log/installer?status=${1}&error=${2}&distribution=${distribution}&version=${version}&installer=${package_installer}&tree=${tree}&detection=${detection}&netdata=${PACKAGES_NETDATA}&python=${PACKAGES_NETDATA_PYTHON}&python3=${PACKAGES_NETDATA_PYTHON3}&sensors=${PACKAGES_NETDATA_SENSORS}&database=${PACKAGES_NETDATA_DATABASE}&ebpf=${PACKAGES_NETDATA_EBPF}&firehol=${PACKAGES_FIREHOL}&fireqos=${PACKAGES_FIREQOS}&iprange=${PACKAGES_IPRANGE}&update_ipsets=${PACKAGES_UPDATE_IPSETS}&demo=${PACKAGES_NETDATA_DEMO_SITE}"
+}
+
+if [ -z "${1}" ]; then
+ usage
+ exit 1
+fi
+
+pv=$(python --version 2>&1)
+if [ "${tree}" = macos ]; then
+ pv=3
+elif [[ "${pv}" =~ ^Python\ 2.* ]]; then
+ pv=2
+elif [[ "${pv}" =~ ^Python\ 3.* ]]; then
+ pv=3
+elif [[ "${tree}" == "centos" ]] && [ "${version}" -lt 8 ]; then
+ pv=2
+else
+ pv=3
+fi
+
+# parse command line arguments
+DONT_WAIT=0
+NON_INTERACTIVE=0
+IGNORE_INSTALLED=0
+while [ -n "${1}" ]; do
+ case "${1}" in
+ distribution)
+ distribution="${2}"
+ shift
+ ;;
+
+ version)
+ version="${2}"
+ shift
+ ;;
+
+ codename)
+ codename="${2}"
+ shift
+ ;;
+
+ installer)
+ check_package_manager "${2}" || exit 1
+ shift
+ ;;
+
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ ignore-installed | --ignore-installed | -i)
+ IGNORE_INSTALLED=1
+ ;;
+
+ netdata-all)
+ PACKAGES_NETDATA=1
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ fi
+ PACKAGES_NETDATA_SENSORS=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ PACKAGES_NETDATA_STREAMING_COMPRESSION=1
+ ;;
+
+ netdata)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ PACKAGES_NETDATA_STREAMING_COMPRESSION=1
+ ;;
+
+ python | netdata-python)
+ PACKAGES_NETDATA_PYTHON=1
+ ;;
+
+ python3 | netdata-python3)
+ PACKAGES_NETDATA_PYTHON3=1
+ ;;
+
+ sensors | netdata-sensors)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_SENSORS=1
+ PACKAGES_NETDATA_DATABASE=1
+ ;;
+
+ firehol | update-ipsets | firehol-all | fireqos)
+ PACKAGES_IPRANGE=1
+ PACKAGES_FIREHOL=1
+ PACKAGES_FIREQOS=1
+ PACKAGES_IPRANGE=1
+ PACKAGES_UPDATE_IPSETS=1
+ ;;
+
+ demo | all)
+ PACKAGES_NETDATA=1
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ fi
+ PACKAGES_DEBUG=1
+ PACKAGES_IPRANGE=1
+ PACKAGES_FIREHOL=1
+ PACKAGES_FIREQOS=1
+ PACKAGES_UPDATE_IPSETS=1
+ PACKAGES_NETDATA_DEMO_SITE=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+# Check for missing core commands like grep, warn the user to install it and bail out cleanly
+if ! command -v grep > /dev/null 2>&1; then
+ echo >&2
+ echo >&2 "ERROR: 'grep' is required for the install to run correctly and was not found on the system."
+ echo >&2 "Please install grep and run the installer again."
+ echo >&2
+ exit 1
+fi
+
+if [ -z "${package_installer}" ] || [ -z "${tree}" ]; then
+ if [ -z "${distribution}" ]; then
+ # we dont know the distribution
+ autodetect_distribution || user_picks_distribution
+ fi
+
+ # When no package installer is detected, try again from distro info if any
+ if [ -z "${package_installer}" ]; then
+ detect_package_manager_from_distribution "${distribution}"
+ fi
+
+ # Validate package manager trees
+ validate_package_trees
+fi
+
+[ "${detection}" = "/etc/os-release" ] && cat << EOF
+
+/etc/os-release information:
+NAME : ${NAME}
+VERSION : ${VERSION}
+ID : ${ID}
+ID_LIKE : ${ID_LIKE}
+VERSION_ID : ${VERSION_ID}
+EOF
+
+cat << EOF
+
+We detected these:
+Distribution : ${distribution}
+Version : ${version}
+Codename : ${codename}
+Package Manager : ${package_installer}
+Packages Tree : ${tree}
+Detection Method: ${detection}
+Default Python v: ${pv} $([ ${pv} -eq 2 ] && [ "${PACKAGES_NETDATA_PYTHON3}" -eq 1 ] && echo "(will install python3 too)")
+
+EOF
+
+mapfile -t PACKAGES_TO_INSTALL < <(packages | sort -u)
+
+if [ ${#PACKAGES_TO_INSTALL[@]} -gt 0 ]; then
+ echo >&2
+ echo >&2 "The following command will be run:"
+ echo >&2
+ DRYRUN=1
+ "${package_installer}" "${PACKAGES_TO_INSTALL[@]}"
+ DRYRUN=0
+ echo >&2
+ echo >&2
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+
+ "${package_installer}" "${PACKAGES_TO_INSTALL[@]}" || install_failed $?
+
+ echo >&2
+ echo >&2 "All Done! - Now proceed to the next step."
+ echo >&2
+
+else
+ echo >&2
+ echo >&2 "All required packages are already installed. Now proceed to the next step."
+ echo >&2
+fi
+
+remote_log "OK"
+
+exit 0
diff --git a/packaging/installer/kickstart-ng.sh b/packaging/installer/kickstart-ng.sh
new file mode 120000
index 00000000..71d605e7
--- /dev/null
+++ b/packaging/installer/kickstart-ng.sh
@@ -0,0 +1 @@
+kickstart.sh \ No newline at end of file
diff --git a/packaging/installer/kickstart-static64.sh b/packaging/installer/kickstart-static64.sh
new file mode 120000
index 00000000..71d605e7
--- /dev/null
+++ b/packaging/installer/kickstart-static64.sh
@@ -0,0 +1 @@
+kickstart.sh \ No newline at end of file
diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh
new file mode 100755
index 00000000..f7c07827
--- /dev/null
+++ b/packaging/installer/kickstart.sh
@@ -0,0 +1,2332 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Next unused error code: F0516
+
+# ======================================================================
+# Constants
+
+DEFAULT_RELEASE_CHANNEL="nightly"
+KICKSTART_OPTIONS="${*}"
+KICKSTART_SOURCE="$(
+ self=${0}
+ while [ -L "${self}" ]
+ do
+ cd "${self%/*}" || exit 1
+ self=$(readlink "${self}")
+ done
+ cd "${self%/*}" || exit 1
+ echo "$(pwd -P)/${self##*/}"
+)"
+DEFAULT_PLUGIN_PACKAGES=""
+PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
+REPOCONFIG_DEB_VERSION="2-1"
+REPOCONFIG_RPM_VERSION="2-1"
+START_TIME="$(date +%s)"
+STATIC_INSTALL_ARCHES="x86_64 armv7l aarch64 ppc64le"
+
+# ======================================================================
+# URLs used throughout the script
+
+AGENT_BUG_REPORT_URL="https://github.com/netdata/netdata/issues/new/choose"
+CLOUD_BUG_REPORT_URL="https://github.com/netdata/netdata-cloud/issues/new/choose"
+DISCORD_INVITE="https://discord.gg/5ygS846fR6"
+DISCUSSIONS_URL="https://github.com/netdata/netdata/discussions"
+DOCS_URL="https://learn.netdata.cloud/docs/"
+FORUM_URL="https://community.netdata.cloud/"
+INSTALL_DOC_URL="https://learn.netdata.cloud/docs/install-the-netdata-agent/one-line-installer-for-all-linux-systems"
+PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"
+PUBLIC_CLOUD_URL="https://app.netdata.cloud"
+RELEASE_INFO_URL="https://repo.netdata.cloud/releases"
+REPOCONFIG_DEB_URL_PREFIX="https://repo.netdata.cloud/repos/repoconfig"
+REPOCONFIG_RPM_URL_PREFIX="https://repo.netdata.cloud/repos/repoconfig"
+TELEMETRY_URL="https://us-east1-netdata-analytics-bi.cloudfunctions.net/ingest_agent_events"
+
+# ======================================================================
+# Defaults for environment variables
+
+DRY_RUN=0
+SELECTED_INSTALL_METHOD="none"
+INSTALL_TYPE="unknown"
+INSTALL_PREFIX=""
+NETDATA_AUTO_UPDATES="default"
+NETDATA_CLAIM_URL="https://app.netdata.cloud"
+NETDATA_COMMAND="default"
+NETDATA_DISABLE_CLOUD=0
+NETDATA_INSTALLER_OPTIONS=""
+NETDATA_FORCE_METHOD=""
+NETDATA_OFFLINE_INSTALL_SOURCE=""
+NETDATA_REQUIRE_CLOUD=1
+NETDATA_WARNINGS=""
+RELEASE_CHANNEL="default"
+
+if [ -n "$DISABLE_TELEMETRY" ]; then
+ NETDATA_DISABLE_TELEMETRY="${DISABLE_TELEMETRY}"
+elif [ -n "$DO_NOT_TRACK" ]; then
+ NETDATA_DISABLE_TELEMETRY="${DO_NOT_TRACK}"
+else
+ NETDATA_DISABLE_TELEMETRY=0
+fi
+
+NETDATA_TARBALL_BASEURL="${NETDATA_TARBALL_BASEURL:-https://github.com/netdata/netdata-nightlies/releases}"
+
+if echo "${0}" | grep -q 'kickstart-static64'; then
+ NETDATA_FORCE_METHOD='static'
+fi
+
+if [ ! -t 1 ]; then
+ INTERACTIVE=0
+else
+ INTERACTIVE=1
+fi
+
+CURL="$(PATH="${PATH}:/opt/netdata/bin" command -v curl 2>/dev/null && true)"
+
+# ======================================================================
+# Shared messages used in multiple places throughout the script.
+
+BADCACHE_MSG="Usually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour"
+BADNET_MSG="This is usually a result of a networking issue"
+ERROR_F0003="Could not find a usable HTTP client. Either curl or wget is required to proceed with installation."
+BADOPT_MSG="If you are following a third-party guide online, please see ${INSTALL_DOC_URL} for current instructions for using this script. If you are using a local copy of this script instead of fetching it from our servers, consider updating it. If you intended to pass this option to the installer code, please use either --local-build-options or --static-install-options to specify it instead."
+
+# ======================================================================
+# Core program logic
+
+main() {
+ case "${ACTION}" in
+ uninstall)
+ uninstall
+ printf >&2 "Finished uninstalling the Netdata Agent."
+ deferred_warnings
+ cleanup
+ trap - EXIT
+ exit 0
+ ;;
+ reinstall-clean)
+ NEW_INSTALL_PREFIX="${INSTALL_PREFIX}"
+ uninstall
+ cleanup
+
+ ACTION=''
+ INSTALL_PREFIX="${NEW_INSTALL_PREFIX}"
+ # shellcheck disable=SC2086
+ main
+
+ trap - EXIT
+ exit 0
+ ;;
+ prepare-offline)
+ prepare_offline_install_source "${OFFLINE_TARGET}"
+ deferred_warnings
+ trap - EXIT
+ exit 0
+ ;;
+ esac
+
+ set_tmpdir
+
+ if [ -n "${INSTALL_VERSION}" ]; then
+ if echo "${INSTALL_VERSION}" | grep -E -o "^[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+$" > /dev/null 2>&1; then
+ NEW_SELECTED_RELEASE_CHANNEL="stable"
+ else
+ NEW_SELECTED_RELEASE_CHANNEL="nightly"
+ fi
+
+ if ! [ "${NEW_SELECTED_RELEASE_CHANNEL}" = "${SELECTED_RELEASE_CHANNEL}" ]; then
+ warning "Selected release channel does not match this version and it will be changed automatically."
+ SELECTED_RELEASE_CHANNEL="${NEW_SELECTED_RELEASE_CHANNEL}"
+ fi
+ fi
+
+ case "${SYSTYPE}" in
+ Linux) install_on_linux ;;
+ Darwin) install_on_macos ;;
+ FreeBSD) install_on_freebsd ;;
+ esac
+
+ if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then
+ claim
+ elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then
+ soft_disable_cloud
+ fi
+
+ set_auto_updates
+
+ printf >&2 "%s\n\n" "Successfully installed the Netdata Agent."
+ deferred_warnings
+ success_banner
+ telemetry_event INSTALL_SUCCESS "" ""
+ cleanup
+ trap - EXIT
+}
+
+# ======================================================================
+# Usage info
+
+usage() {
+ cat << HEREDOC
+USAGE: kickstart.sh [options]
+ where options include:
+
+ --non-interactive Do not prompt for user input. (default: prompt if there is a controlling terminal)
+ --interactive Prompt for user input even if there is no controlling terminal.
+ --dont-start-it Do not start the agent by default (only for static installs or local builds)
+ --dry-run Report what we would do with the given options on this system, but don’t actually do anything.
+ --release-channel Specify the release channel to use for the install (default: ${DEFAULT_RELEASE_CHANNEL})
+ --stable-channel Equivalent to "--release-channel stable"
+ --nightly-channel Equivalent to "--release-channel nightly"
+ --no-updates Do not enable automatic updates (default: enable automatic updates using the best supported scheduling method)
+ --auto-update Enable automatic updates.
+ --auto-update-type Specify a particular scheduling type for auto-updates (valid types: systemd, interval, crontab)
+ --disable-telemetry Opt-out of anonymous statistics.
+ --native-only Only install if native binary packages are available.
+ --static-only Only install if a static build is available.
+ --build-only Only install using a local build.
+ --disable-cloud Disable support for Netdata Cloud (default: detect)
+ --require-cloud Only install if Netdata Cloud can be enabled. Overrides --disable-cloud.
+ --install-prefix <path> Specify an installation prefix for local builds (default: autodetect based on system type).
+ --old-install-prefix <path> Specify an old local builds installation prefix for uninstall/reinstall (if it's not default).
+ --install-version <version> Specify the version of Netdata to install.
+ --claim-token Use a specified token for claiming to Netdata Cloud.
+ --claim-rooms When claiming, add the node to the specified rooms.
+ --claim-* Specify other options for the claiming script.
+ --no-cleanup Don't do any cleanup steps. This is intended to help with debugging the installer.
+ --local-build-options Specify additional options to pass to the installer code when building locally. Only valid if --build-only is also specified.
+ --static-install-options Specify additional options to pass to the static installer code. Only valid if --static-only is also specified.
+
+The following options are mutually exclusive and specifiy special operations other than trying to install Netdata normally or update an existing install:
+
+ --reinstall If there is an existing install, reinstall it instead of trying to update it. If there is no existing install, install netdata normally.
+ --reinstall-even-if-unsafe If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things. If there is no existing install, install Netdata normally.
+ --reinstall-clean If there is an existing install, uninstall it before trying to install Netdata. Fails if there is no existing install.
+ --uninstall Uninstall an existing installation of Netdata. Fails if there is no existing install.
+ --claim-only If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally.
+ --repositories-only Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only.
+ --prepare-offline-install-source Instead of installing the agent, prepare a directory that can be used to install on another system without needing to download anything.
+
+Additionally, this script may use the following environment variables:
+
+ TMPDIR: Used to specify where to put temporary files. On most systems, the default we select
+ automatically should be fine. The user running the script needs to both be able to
+ write files to the temporary directory, and run files from that location.
+ ROOTCMD: Used to specify a command to use to run another command with root privileges if needed. By
+ default we try to use sudo, doas, or pkexec (in that order of preference), but if
+ you need special options for one of those to work, or have a different tool to do
+ the same thing on your system, you can specify it here.
+ DISABLE_TELEMETRY If set to a value other than 0, behave as if \`--disable-telemetry\` was specified.
+
+HEREDOC
+}
+
+# ======================================================================
+# Telemetry functions
+
+telemetry_event() {
+ if [ "${NETDATA_DISABLE_TELEMETRY}" -eq 1 ] || [ "${DRY_RUN}" -eq 1 ]; then
+ return 0
+ fi
+
+ now="$(date +%s)"
+ total_duration="$((now - START_TIME))"
+
+ if [ -e "/etc/os-release" ]; then
+ eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" < /etc/os-release | sed 's/^/HOST_/')"
+ fi
+
+ if [ -z "${HOST_NAME}" ] || [ -z "${HOST_VERSION}" ] || [ -z "${HOST_ID}" ]; then
+ if [ -f "/etc/lsb-release" ]; then
+ DISTRIB_ID="unknown"
+ DISTRIB_RELEASE="unknown"
+ DISTRIB_CODENAME="unknown"
+ eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" < /etc/lsb-release)"
+ if [ -z "${HOST_NAME}" ]; then HOST_NAME="${DISTRIB_ID}"; fi
+ if [ -z "${HOST_VERSION}" ]; then HOST_VERSION="${DISTRIB_RELEASE}"; fi
+ if [ -z "${HOST_ID}" ]; then HOST_ID="${DISTRIB_CODENAME}"; fi
+ fi
+ fi
+
+ KERNEL_NAME="$(uname -s)"
+
+ if [ "${KERNEL_NAME}" = FreeBSD ]; then
+ TOTAL_RAM="$(sysctl -n hw.physmem)"
+ elif [ "${KERNEL_NAME}" = Darwin ]; then
+ TOTAL_RAM="$(sysctl -n hw.memsize)"
+ elif [ -r /proc/meminfo ]; then
+ TOTAL_RAM="$(grep -F MemTotal /proc/meminfo | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | cut -f 1 -d ' ')"
+ TOTAL_RAM="$((TOTAL_RAM * 1024))"
+ fi
+
+ MD5_PATH="$(exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum)"
+
+ if [ "${KERNEL_NAME}" = Darwin ] && command -v ioreg >/dev/null 2>&1; then
+ DISTINCT_ID="macos-$(ioreg -rd1 -c IOPlatformExpertDevice | awk '/IOPlatformUUID/ { split($0, line, "\""); printf("%s\n", line[4]); }')"
+ elif [ -f /etc/machine-id ] && [ -n "$MD5_PATH" ]; then
+ DISTINCT_ID="machine-$($MD5_PATH < /etc/machine-id | cut -f1 -d" ")"
+ elif [ -f /var/db/dbus/machine-id ] && [ -n "$MD5_PATH" ]; then
+ DISTINCT_ID="dbus-$($MD5_PATH < /var/db/dbus/machine-id | cut -f1 -d" ")"
+ elif [ -f /var/lib/dbus/machine-id ] && [ -n "$MD5_PATH" ]; then
+ DISTINCT_ID="dbus-$($MD5_PATH < /var/lib/dbus/machine-id | cut -f1 -d" ")"
+ elif command -v uuidgen > /dev/null 2>&1; then
+ DISTINCT_ID="uuid-$(uuidgen | tr '[:upper:]' '[:lower:]')"
+ else
+ DISTINCT_ID="null"
+ fi
+
+ REQ_BODY="$(cat << EOF
+{
+ "event": "${1}",
+ "properties": {
+ "distinct_id": "${DISTINCT_ID}",
+ "event_source": "agent installer",
+ "\$current_url": "agent installer",
+ "\$pathname": "netdata-installer",
+ "\$host": "installer.netdata.io",
+ "\$ip": "127.0.0.1",
+ "script_variant": "kickstart-ng",
+ "error_code": "${3}",
+ "error_message": "${2}",
+ "install_options": "${KICKSTART_OPTIONS}",
+ "install_interactivity": "${INTERACTIVE}",
+ "install_auto_updates": "${NETDATA_AUTO_UPDATES}",
+ "install_command": "${NETDATA_COMMAND}",
+ "total_runtime": "${total_duration}",
+ "selected_install_method": "${SELECTED_INSTALL_METHOD}",
+ "netdata_release_channel": "${RELEASE_CHANNEL:-null}",
+ "netdata_install_type": "${INSTALL_TYPE}",
+ "host_os_name": "${HOST_NAME:-unknown}",
+ "host_os_id": "${HOST_ID:-unknown}",
+ "host_os_id_like": "${HOST_ID_LIKE:-unknown}",
+ "host_os_version": "${HOST_VERSION:-unknown}",
+ "host_os_version_id": "${HOST_VERSION_ID:-unknown}",
+ "system_kernel_name": "${KERNEL_NAME}",
+ "system_kernel_version": "$(uname -r)",
+ "system_architecture": "$(uname -m)",
+ "system_total_ram": "${TOTAL_RAM:-unknown}",
+ "system_distinct_id": "${DISTINCT_ID}"
+ }
+}
+EOF
+)"
+
+ if [ -n "${CURL}" ]; then
+ "${CURL}" --silent -o /dev/null -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" "${TELEMETRY_URL}" > /dev/null
+ elif command -v wget > /dev/null 2>&1; then
+ if wget --help 2>&1 | grep BusyBox > /dev/null 2>&1; then
+ # BusyBox-compatible version of wget, there is no --no-check-certificate option
+ wget -q -O - \
+ -T 1 \
+ --header 'Content-Type: application/json' \
+ --post-data "${REQ_BODY}" \
+ "${TELEMETRY_URL}" > /dev/null
+ else
+ wget -q -O - --no-check-certificate \
+ --method POST \
+ --timeout=1 \
+ --header 'Content-Type: application/json' \
+ --body-data "${REQ_BODY}" \
+ "${TELEMETRY_URL}" > /dev/null
+ fi
+ fi
+}
+
+trap_handler() {
+ code="${1}"
+ lineno="${2}"
+
+ deferred_warnings
+
+ printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ERROR ${TPUT_RESET} Installer exited unexpectedly (${code}-${lineno})"
+
+ case "${code}" in
+ 0) printf >&2 "%s\n" "This is almost certainly the result of a bug. If you have time, please report it at ${AGENT_BUG_REPORT_URL}." ;;
+ *)
+ printf >&2 "%s\n" "This is probably a result of a transient issue on your system. Things should work correctly if you try again."
+ printf >&2 "%s\n" "If you continue to experience this issue, you can reacn out to us for support on:"
+ support_list
+ ;;
+ esac
+
+ telemetry_event INSTALL_CRASH "Installer exited unexpectedly (${code}-${lineno})" "E${code}-${lineno}"
+
+ trap - EXIT
+
+ cleanup
+
+ exit 1
+}
+
+trap 'trap_handler 0 ${LINENO}' EXIT
+trap 'trap_handler 1 0' HUP
+trap 'trap_handler 2 0' INT
+trap 'trap_handler 3 0' QUIT
+trap 'trap_handler 13 0' PIPE
+trap 'trap_handler 15 0' TERM
+
+# ======================================================================
+# Utility functions
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_WHITE=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput > /dev/null 2>&1; then
+ if num_colors=$(tput colors 2> /dev/null) && [ "${num_colors:-0}" -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ fi
+ fi
+
+ echo "${TPUT_RESET}"
+
+ return 0
+}
+
+support_list() {
+ printf >&2 "%s\n" " - GitHub: ${DISCUSSIONS_URL}"
+ printf >&2 "%s\n" " - Discord: ${DISCORD_INVITE}"
+ printf >&2 "%s\n" " - Our community forums: ${FORUM_URL}"
+}
+
+success_banner() {
+ printf >&2 "%s\n\n" "Official documentation can be found online at ${DOCS_URL}."
+
+ if [ -z "${CLAIM_TOKEN}" ]; then
+ printf >&2 "%s\n\n" "Looking to monitor all of your infrastructure with Netdata? Check out Netdata Cloud at ${PUBLIC_CLOUD_URL}."
+ fi
+
+ printf >&2 "%s\n" "Join our community and connect with us on:"
+ support_list
+}
+
+cleanup() {
+ if [ -z "${NO_CLEANUP}" ] && [ -n "${tmpdir}" ]; then
+ cd || true
+ DRY_RUN=0
+ run_as_root rm -rf "${tmpdir}"
+ fi
+}
+
+deferred_warnings() {
+ if [ -n "${NETDATA_WARNINGS}" ]; then
+ printf >&2 "%s\n" "The following non-fatal warnings or errors were encountered:"
+ # shellcheck disable=SC2059
+ printf >&2 "${NETDATA_WARNINGS}"
+ printf >&2 "\n\n"
+ fi
+}
+
+fatal() {
+ deferred_warnings
+ printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${1}"
+ printf >&2 "%s\n" "For community support, you can connect with us on:"
+ support_list
+ telemetry_event "INSTALL_FAILED" "${1}" "${2}"
+ cleanup
+ trap - EXIT
+ exit 1
+}
+
+ESCAPED_PRINT_METHOD=
+# shellcheck disable=SC3050
+if printf "%s " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ # shellcheck disable=SC3050
+ printf "%s " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+progress() {
+ echo >&2 " --- ${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+run_logfile="/dev/null"
+run() {
+ user="${USER--}"
+ dir="${PWD}"
+
+ if [ "$(id -u)" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ printf >&2 "%s" "Would run command:\n"
+ fi
+
+ {
+ printf "%s" "${info}"
+ escaped_print "${@}"
+ printf " ... "
+ } >> "${run_logfile}"
+
+ printf >&2 "%s" "${info_console}${TPUT_BOLD}"
+ escaped_print >&2 "${@}"
+ printf >&2 "%s\n" "${TPUT_RESET}"
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ "${@}"
+ ret=$?
+ else
+ ret=0
+ fi
+
+ if [ ${ret} -ne 0 ]; then
+ printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET}"
+ printf "%s\n" "FAILED with exit code ${ret}" >> "${run_logfile}"
+ # shellcheck disable=SC2089
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - Command \"${*}\" failed with exit code ${ret}."
+ else
+ printf >&2 "%s\n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET}"
+ printf "OK\n" >> "${run_logfile}"
+ fi
+
+ return ${ret}
+}
+
+run_as_root() {
+ confirm_root_support
+
+ if [ "$(id -u)" -ne "0" ]; then
+ printf >&2 "Root privileges required to run %s\n" "${*}"
+ fi
+
+ run ${ROOTCMD} "${@}"
+}
+
+run_script() {
+ set_tmpdir
+
+ export NETDATA_SCRIPT_STATUS_PATH="${tmpdir}/.script-status"
+
+ export NETDATA_SAVE_WARNINGS=1
+ export NETDATA_PROPAGATE_WARNINGS=1
+ # shellcheck disable=SC2090
+ export NETDATA_WARNINGS="${NETDATA_WARNINGS}"
+
+ # shellcheck disable=SC2086
+ run ${ROOTCMD} "${@}"
+
+ if [ -r "${NETDATA_SCRIPT_STATUS_PATH}" ]; then
+ # shellcheck disable=SC1090
+ . "${NETDATA_SCRIPT_STATUS_PATH}"
+ rm -f "${NETDATA_SCRIPT_STATUS_PATH}"
+ fi
+}
+
+warning() {
+ printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} WARNING ${TPUT_RESET} ${*}"
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - ${*}"
+}
+
+_cannot_use_tmpdir() {
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ]; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}"; then
+ if chmod +x "${testfile}"; then
+ if [ "$("${testfile}")" = "SUCCESS" ]; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}"; then
+ if _cannot_use_tmpdir /tmp; then
+ if _cannot_use_tmpdir "${PWD}"; then
+ fatal "Unable to find a usable temporary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again." F0400
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+
+ mktemp -d -t netdata-kickstart-XXXXXXXXXX
+}
+
+set_tmpdir() {
+ if [ -z "${tmpdir}" ] || [ ! -d "${tmpdir}" ]; then
+ tmpdir="$(create_tmp_directory)"
+ progress "Using ${tmpdir} as a temporary directory."
+ cd "${tmpdir}" || fatal "Failed to change current working directory to ${tmpdir}." F000A
+ fi
+}
+
+check_for_remote_file() {
+ url="${1}"
+
+ if echo "${url}" | grep -Eq "^file:///"; then
+ [ -e "${url#file://}" ] || return 1
+ elif [ -n "${CURL}" ]; then
+ "${CURL}" --output /dev/null --silent --head --fail "${url}" || return 1
+ elif command -v wget > /dev/null 2>&1; then
+ wget -S --spider "${url}" 2>&1 | grep -q 'HTTP/1.1 200 OK' || return 1
+ else
+ fatal "${ERROR_F0003}" F0003
+ fi
+}
+
+download() {
+ url="${1}"
+ dest="${2}"
+
+ if echo "${url}" | grep -Eq "^file:///"; then
+ run cp "${url#file://}" "${dest}" || return 1
+ elif [ -n "${CURL}" ]; then
+ run "${CURL}" --fail -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}" || return 1
+ elif command -v wget > /dev/null 2>&1; then
+ run wget -T 15 -O "${dest}" "${url}" || return 1
+ else
+ fatal "${ERROR_F0003}" F0003
+ fi
+}
+
+get_actual_version() {
+ major="${1}"
+ channel="${2}"
+ url="${RELEASE_INFO_URL}/${channel}/${major}"
+
+ if check_for_remote_file "${RELEASE_INFO_URL}"; then
+ if check_for_remote_file "${url}"; then
+ download "${url}" -
+ else
+ echo "NONE"
+ fi
+ else
+ echo ""
+ fi
+}
+
+get_redirect() {
+ url="${1}"
+
+ if [ -n "${CURL}" ]; then
+ run sh -c "${CURL} ${url} -s -L -I -o /dev/null -w '%{url_effective}' | grep -o '[^/]*$'" || return 1
+ elif command -v wget > /dev/null 2>&1; then
+ run sh -c "wget -S -O /dev/null ${url} 2>&1 | grep -m 1 Location | grep -o '[^/]*$'" || return 1
+ else
+ fatal "${ERROR_F0003}" F0003
+ fi
+}
+
+safe_sha256sum() {
+ # Within the context of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start using non-common options
+ if command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ elif command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ else
+ fatal "Could not find a usable checksum tool. Either sha256sum, or a version of shasum supporting SHA256 checksums is required to proceed with installation." F0004
+ fi
+}
+
+get_system_info() {
+ SYSARCH="$(uname -m)"
+
+ case "$(uname -s)" in
+ Linux)
+ SYSTYPE="Linux"
+
+ if [ -z "${SKIP_DISTRO_DETECTION}" ]; then
+ os_release_file=
+ if [ -s "/etc/os-release" ] && [ -r "/etc/os-release" ]; then
+ os_release_file="/etc/os-release"
+ elif [ -s "/usr/lib/os-release" ] && [ -r "/usr/lib/os-release" ]; then
+ os_release_file="/usr/lib/os-release"
+ else
+ warning "Cannot find usable OS release information. Native packages will not be available for this install."
+ fi
+
+ if [ -n "${os_release_file}" ]; then
+ # shellcheck disable=SC1090
+ . "${os_release_file}"
+
+ DISTRO="${ID}"
+ SYSVERSION="${VERSION_ID}"
+ SYSCODENAME="${VERSION_CODENAME}"
+ else
+ DISTRO="unknown"
+ DISTRO_COMPAT_NAME="unknown"
+ SYSVERSION="unknown"
+ SYSCODENAME="unknown"
+ fi
+ else
+ warning "Distribution auto-detection overridden by user. This is not guaranteed to work, and is not officially supported."
+ fi
+
+ supported_compat_names="debian ubuntu centos fedora opensuse ol amzn arch"
+
+ if str_in_list "${DISTRO}" "${supported_compat_names}"; then
+ DISTRO_COMPAT_NAME="${DISTRO}"
+ else
+ case "${DISTRO}" in
+ opensuse-leap)
+ DISTRO_COMPAT_NAME="opensuse"
+ ;;
+ opensuse-tumbleweed)
+ DISTRO_COMPAT_NAME="opensuse"
+ SYSVERSION="tumbleweed"
+ ;;
+ cloudlinux|almalinux|centos-stream|rocky|rhel)
+ DISTRO_COMPAT_NAME="centos"
+ ;;
+ artix|manjaro|obarun)
+ DISTRO_COMPAT_NAME="arch"
+ ;;
+ *)
+ DISTRO_COMPAT_NAME="unknown"
+ ;;
+ esac
+ fi
+
+ case "${DISTRO_COMPAT_NAME}" in
+ centos|ol) SYSVERSION=$(echo "$SYSVERSION" | cut -d'.' -f1) ;;
+ esac
+ ;;
+ Darwin)
+ SYSTYPE="Darwin"
+ SYSVERSION="$(sw_vers -buildVersion)"
+ ;;
+ FreeBSD)
+ SYSTYPE="FreeBSD"
+ SYSVERSION="$(uname -K)"
+ ;;
+ *) fatal "Unsupported system type detected. Netdata cannot be installed on this system using this script." F0200 ;;
+ esac
+}
+
+str_in_list() {
+ printf "%s\n" "${2}" | tr ' ' "\n" | grep -qE "^${1}\$"
+ return $?
+}
+
+confirm_root_support() {
+ if [ "$(id -u)" -ne "0" ]; then
+ if [ -z "${ROOTCMD}" ] && command -v sudo > /dev/null; then
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ ROOTCMD="sudo -n"
+ else
+ ROOTCMD="sudo"
+ fi
+ fi
+
+ if [ -z "${ROOTCMD}" ] && command -v doas > /dev/null; then
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ ROOTCMD="doas -n"
+ else
+ ROOTCMD="doas"
+ fi
+ fi
+
+ if [ -z "${ROOTCMD}" ] && command -v pkexec > /dev/null; then
+ ROOTCMD="pkexec"
+ fi
+
+ if [ -z "${ROOTCMD}" ]; then
+ fatal "This script needs root privileges to install Netdata, but cannot find a way to gain them (we support sudo, doas, and pkexec). Either re-run this script as root, or set \$ROOTCMD to a command that can be used to gain root privileges." F0201
+ fi
+ fi
+}
+
+confirm() {
+ prompt="${1} [y/n]"
+
+ while true; do
+ echo "${prompt}"
+ read -r yn
+
+ case "$yn" in
+ [Yy]*) return 0;;
+ [Nn]*) return 1;;
+ *) echo "Please answer yes or no.";;
+ esac
+ done
+}
+
+# ======================================================================
+# Existing install handling code
+
+update() {
+ updater="${ndprefix}/usr/libexec/netdata/netdata-updater.sh"
+
+ if run_as_root test -x "${updater}"; then
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to update existing installation by running the updater script located at: ${updater}"
+ return 0
+ fi
+
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ opts="--non-interactive"
+ else
+ opts="--interactive"
+ fi
+
+ if run_script "${updater}" ${opts} --not-running-from-cron; then
+ progress "Updated existing install at ${ndprefix}"
+ return 0
+ else
+ if [ -n "${EXIT_REASON}" ]; then
+ fatal "Failed to update existing Netdata install at ${ndprefix}: ${EXIT_REASON}" "${EXIT_CODE}"
+ else
+ fatal "Failed to update existing Netdata install at ${ndprefix}: Encountered an unhandled error in the updater. Further information about this error may be displayed above." U0000
+ fi
+ fi
+ else
+ warning "Could not find a usable copy of the updater script. We are unable to update this system in place."
+ return 1
+ fi
+}
+
+uninstall() {
+ set_tmpdir
+ get_system_info
+ detect_existing_install
+
+ if [ -n "${OLD_INSTALL_PREFIX}" ]; then
+ INSTALL_PREFIX="$(echo "${OLD_INSTALL_PREFIX}/" | sed 's/$/netdata/g')"
+ else
+ INSTALL_PREFIX="${ndprefix}"
+ fi
+
+ uninstaller="${INSTALL_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh"
+ uninstaller_url="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-uninstaller.sh"
+
+ if [ $INTERACTIVE = 0 ]; then
+ FLAGS="--yes --force"
+ else
+ FLAGS="--yes"
+ fi
+
+ if run_as_root test -x "${uninstaller}"; then
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to uninstall existing install with uninstaller script found at: ${uninstaller}"
+ return 0
+ else
+ progress "Found existing netdata-uninstaller. Running it.."
+ # shellcheck disable=SC2086
+ if ! run_script "${uninstaller}" ${FLAGS}; then
+ warning "Uninstaller failed. Some parts of Netdata may still be present on the system."
+ fi
+ fi
+ else
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would download installer script from: ${uninstaller_url}"
+ progress "Would attempt to uninstall existing install with downloaded uninstaller script."
+ return 0
+ else
+ progress "Downloading netdata-uninstaller ..."
+ download "${uninstaller_url}" "${tmpdir}/netdata-uninstaller.sh"
+ chmod +x "${tmpdir}/netdata-uninstaller.sh"
+ # shellcheck disable=SC2086
+ if ! run_script "${tmpdir}/netdata-uninstaller.sh" ${FLAGS}; then
+ warning "Uninstaller failed. Some parts of Netdata may still be present on the system."
+ fi
+ fi
+ fi
+}
+
+detect_existing_install() {
+ set_tmpdir
+
+ progress "Checking for existing installations of Netdata..."
+
+ if pkg_installed netdata; then
+ ndprefix="/"
+ EXISTING_INSTALL_IS_NATIVE="1"
+ else
+ EXISTING_INSTALL_IS_NATIVE="0"
+ if [ -n "${INSTALL_PREFIX}" ]; then
+ searchpath="${INSTALL_PREFIX}/bin:${INSTALL_PREFIX}/sbin:${INSTALL_PREFIX}/usr/bin:${INSTALL_PREFIX}/usr/sbin:${PATH}"
+ searchpath="${INSTALL_PREFIX}/netdata/bin:${INSTALL_PREFIX}/netdata/sbin:${INSTALL_PREFIX}/netdata/usr/bin:${INSTALL_PREFIX}/netdata/usr/sbin:${searchpath}"
+ else
+ searchpath="${PATH}"
+ fi
+
+ ndpath="$(PATH="${searchpath}" command -v netdata 2>/dev/null)"
+
+ if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ]; then
+ ndpath="/opt/netdata/bin/netdata"
+ fi
+
+ if [ -n "${ndpath}" ]; then
+ case "${ndpath}" in
+ */usr/bin/netdata|*/usr/sbin/netdata) ndprefix="$(dirname "$(dirname "$(dirname "${ndpath}")")")" ;;
+ *) ndprefix="$(dirname "$(dirname "${ndpath}")")" ;;
+ esac
+ fi
+
+ if echo "${ndprefix}" | grep -Eq '^/usr$'; then
+ ndprefix="$(dirname "${ndprefix}")"
+ fi
+ fi
+
+ if [ -n "${ndprefix}" ]; then
+ typefile="${ndprefix}/etc/netdata/.install-type"
+ if [ -r "${typefile}" ]; then
+ # shellcheck disable=SC1090,SC1091
+ . "${typefile}"
+ else
+ INSTALL_TYPE="unknown"
+ fi
+
+ envfile="${ndprefix}/etc/netdata/.environment"
+ if [ "${INSTALL_TYPE}" = "unknown" ] || [ "${INSTALL_TYPE}" = "custom" ]; then
+ if [ -r "${envfile}" ]; then
+ # shellcheck disable=SC1090,SC1091
+ . "${envfile}"
+ if [ -n "${NETDATA_IS_STATIC_INSTALL}" ]; then
+ if [ "${NETDATA_IS_STATIC_INSTALL}" = "yes" ]; then
+ INSTALL_TYPE="legacy-static"
+ else
+ INSTALL_TYPE="legacy-build"
+ fi
+ fi
+ fi
+ fi
+ fi
+}
+
+handle_existing_install() {
+ detect_existing_install
+
+ if [ -z "${ndprefix}" ] || [ -z "${INSTALL_TYPE}" ]; then
+ progress "No existing installations of netdata found, assuming this is a fresh install."
+ return 0
+ fi
+
+ case "${INSTALL_TYPE}" in
+ kickstart-*|legacy-*|binpkg-*|manual-static|unknown)
+ if [ "${INSTALL_TYPE}" = "unknown" ]; then
+ if [ "${EXISTING_INSTALL_IS_NATIVE}" -eq 1 ]; then
+ warning "Found an existing netdata install managed by the system package manager, but could not determine the install type. Usually this means you installed an unsupported third-party netdata package. This script supports claiming most such installs, but attempting to update or reinstall them using this script may be dangerous."
+ else
+ warning "Found an existing netdata install at ${ndprefix}, but could not determine the install type. Usually this means you installed Netdata through your distribution’s regular package repositories or some other unsupported method."
+ fi
+ else
+ progress "Found an existing netdata install at ${ndprefix}, with installation type '${INSTALL_TYPE}'."
+ fi
+
+ if [ "${ACTION}" = "reinstall" ] || [ "${ACTION}" = "unsafe-reinstall" ]; then
+ progress "Found an existing netdata install at ${ndprefix}, but user requested reinstall, continuing."
+
+ case "${INSTALL_TYPE}" in
+ binpkg-*) NETDATA_FORCE_METHOD='native' ;;
+ *-build) NETDATA_FORCE_METHOD='build' ;;
+ *-static) NETDATA_FORCE_METHOD='static' ;;
+ *)
+ if [ "${ACTION}" = "unsafe-reinstall" ]; then
+ warning "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, but the user has requested we proceed."
+ elif [ "${INTERACTIVE}" -eq 0 ]; then
+ fatal "User requested reinstall, but we cannot safely reinstall over top of a ${INSTALL_TYPE} installation, exiting." F0104
+ else
+ if [ "${EXISTING_INSTALL_IS_NATIVE}" ]; then
+ reinstall_prompt="Reinstalling over top of an existing install managed by the system package manager is known to cause things to break, are you sure you want to continue?"
+ else
+ reinstall_prompt="Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, do you want to continue?"
+ fi
+
+ if confirm "${reinstall_prompt}"; then
+ progress "OK, continuing."
+ else
+ fatal "Cancelling reinstallation at user request." F0105
+ fi
+ fi
+ ;;
+ esac
+
+ return 0
+ elif [ "${INSTALL_TYPE}" = "unknown" ]; then
+ claimonly_notice="If you just want to claim this install, you should re-run this command with the --claim-only option instead."
+ if [ "${EXISTING_INSTALL_IS_NATIVE}" -eq 1 ]; then
+ failmsg="Attempting to update an installation managed by the system package manager is known to not work in most cases. If you are trying to install the latest version of Netdata, you will need to manually uninstall it through your system package manager. ${claimonly_notice}"
+ promptmsg="Attempting to update an installation managed by the system package manager is known to not work in most cases. If you are trying to install the latest version of Netdata, you will need to manually uninstall it through your system package manager. ${claimonly_notice} Are you sure you want to continue?"
+ else
+ failmsg="We do not support trying to update or claim installations when we cannot determine the install type. You will need to uninstall the existing install using the same method you used to install it to proceed. ${claimonly_notice}"
+ promptmsg="Attempting to update an existing install with an unknown installation type is not officially supported. It may work, but it also might break your system. ${claimonly_notice} Are you sure you want to continue?"
+ fi
+ if [ "${INTERACTIVE}" -eq 0 ] && [ "${ACTION}" != "claim" ]; then
+ fatal "${failmsg}" F0106
+ elif [ "${INTERACTIVE}" -eq 1 ] && [ "${ACTION}" != "claim" ]; then
+ if confirm "${promptmsg}"; then
+ progress "OK, continuing"
+ else
+ fatal "Cancelling update of unknown installation type at user request." F050C
+ fi
+ fi
+ fi
+
+ ret=0
+
+ if [ "${ACTION}" != "claim" ]; then
+ if ! update; then
+ warning "Failed to update existing Netdata install at ${ndprefix}."
+ else
+ progress "Successfully updated existing netdata install at ${ndprefix}."
+ fi
+ else
+ warning "Not updating existing install at ${ndprefix}."
+ fi
+
+ if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then
+ progress "Attempting to claim existing install at ${ndprefix}."
+ INSTALL_PREFIX="${ndprefix}"
+ claim
+ ret=$?
+ elif [ "${ACTION}" = "claim" ]; then
+ fatal "User asked to claim, but did not provide a claiming token." F0202
+ else
+ progress "Not attempting to claim existing install at ${ndprefix} (no claiming token provided)."
+ fi
+
+ deferred_warnings
+ success_banner
+ cleanup
+ trap - EXIT
+ exit $ret
+ ;;
+ oci)
+ fatal "This is an OCI container, use the regular container lifecycle management commands for your container tools instead of this script for managing it." F0203
+ ;;
+ *)
+ if [ "${ACTION}" = "reinstall" ] || [ "${ACTION}" = "unsafe-reinstall" ]; then
+ if [ "${ACTION}" = "unsafe-reinstall" ]; then
+ warning "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, but the user has requested we proceed."
+ elif [ "${INTERACTIVE}" -eq 0 ]; then
+ fatal "User requested reinstall, but we cannot safely reinstall over top of a ${INSTALL_TYPE} installation, exiting." F0104
+ else
+ if confirm "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, do you want to continue?"; then
+ progress "OK, continuing."
+ else
+ fatal "Cancelling reinstallation at user request." F0105
+ fi
+ fi
+ else
+ if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then
+ progress "Attempting to claim existing install at ${ndprefix}."
+ INSTALL_PREFIX="${ndprefix}"
+ claim
+ ret=$?
+
+ cleanup
+ trap - EXIT
+ exit $ret
+ elif [ "${ACTION}" = "claim" ]; then
+ fatal "User asked to claim, but did not provide a claiming token." F0202
+ else
+ fatal "Found an existing netdata install at ${ndprefix}, but the install type is '${INSTALL_TYPE}', which is not supported by this script, refusing to proceed." F0103
+ fi
+ fi
+ ;;
+ esac
+}
+
+soft_disable_cloud() {
+ set_tmpdir
+
+ cloud_prefix="${INSTALL_PREFIX}/var/lib/netdata/cloud.d"
+
+ run_as_root mkdir -p "${cloud_prefix}"
+
+ cat > "${tmpdir}/cloud.conf" << EOF
+[global]
+ enabled = no
+EOF
+
+ run_as_root cp "${tmpdir}/cloud.conf" "${cloud_prefix}/cloud.conf"
+
+ if [ -z "${NETDATA_NO_START}" ]; then
+ case "${SYSTYPE}" in
+ Darwin) run_as_root launchctl kickstart -k com.github.netdata ;;
+ FreeBSD) run_as_root service netdata restart ;;
+ Linux)
+ initpath="$(run_as_root readlink /proc/1/exe)"
+
+ if command -v service > /dev/null 2>&1; then
+ run_as_root service netdata restart
+ elif command -v rc-service > /dev/null 2>&1; then
+ run_as_root rc-service netdata restart
+ elif [ "$(basename "${initpath}" 2> /dev/null)" = "systemd" ]; then
+ run_as_root systemctl restart netdata
+ elif [ -f /etc/init.d/netdata ]; then
+ run_as_root /etc/init.d/netdata restart
+ fi
+ ;;
+ esac
+ fi
+}
+
+confirm_install_prefix() {
+ if [ -n "${INSTALL_PREFIX}" ] && [ "${NETDATA_FORCE_METHOD}" != 'build' ]; then
+ fatal "The --install-prefix option is only supported together with the --build-only option." F0204
+ fi
+
+ if [ -n "${INSTALL_PREFIX}" ]; then
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install-prefix ${INSTALL_PREFIX}"
+ else
+ case "${SYSTYPE}" in
+ Darwin)
+ INSTALL_PREFIX="/usr/local/netdata"
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install-no-prefix ${INSTALL_PREFIX}"
+ ;;
+ FreeBSD)
+ INSTALL_PREFIX="/usr/local"
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install-no-prefix ${INSTALL_PREFIX}"
+ ;;
+ esac
+ fi
+}
+
+# ======================================================================
+# Claiming support code
+
+check_claim_opts() {
+# shellcheck disable=SC2235,SC2030
+ if [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_ROOMS}" ]; then
+ fatal "Invalid claiming options, claim rooms may only be specified when a token is specified." F0204
+ elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}" ]; then
+ fatal "Invalid claiming options, a claiming token must be specified." F0204
+ elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ] && [ -n "${NETDATA_CLAIM_TOKEN}" ]; then
+ fatal "Cloud explicitly disabled, but automatic claiming requested. Either enable Netdata Cloud, or remove the --claim-* options." F0204
+ fi
+}
+
+is_netdata_running() {
+ if command -v pgrep > /dev/null 2>&1; then
+ if pgrep netdata; then
+ return 0
+ else
+ return 1
+ fi
+ else
+ if [ -z "${INSTALL_PREFIX}" ]; then
+ NETDATACLI_PATH=/usr/sbin/netdatacli
+ elif [ "${INSTALL_PREFIX}" = "/opt/netdata" ]; then
+ NETDATACLI_PATH="/opt/netdata/bin/netdatacli"
+ else
+ NETDATACLI_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdatacli"
+ fi
+
+ if "${NETDATACLI_PATH}" ping > /dev/null 2>&1; then
+ return 0
+ else
+ return 1
+ fi
+ fi
+}
+
+claim() {
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to claim agent to ${NETDATA_CLAIM_URL}"
+ else
+ progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}"
+ fi
+
+ if command -v netdata-claim.sh > /dev/null 2>&1; then
+ NETDATA_CLAIM_PATH="$(command -v netdata-claim.sh)"
+ elif [ -z "${INSTALL_PREFIX}" ] || [ "${INSTALL_PREFIX}" = "/" ]; then
+ NETDATA_CLAIM_PATH=/usr/sbin/netdata-claim.sh
+ elif [ "${INSTALL_PREFIX}" = "/opt/netdata" ]; then
+ NETDATA_CLAIM_PATH="/opt/netdata/bin/netdata-claim.sh"
+ elif [ ! -d "${INSTALL_PREFIX}/netdata" ]; then
+ if [ -d "${INSTALL_PREFIX}/usr" ]; then
+ NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/usr/sbin/netdata-claim.sh"
+ else
+ NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/sbin/netdata-claim.sh"
+ fi
+ else
+ NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh"
+ fi
+
+ err_msg=
+ err_code=
+ if [ -z "${NETDATA_CLAIM_PATH}" ]; then
+ err_msg="Unable to claim node: could not find usable claiming script. Reinstalling Netdata may resolve this."
+ err_code=F050B
+ elif [ ! -e "${NETDATA_CLAIM_PATH}" ]; then
+ err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} does not exist."
+ err_code=F0512
+ elif [ ! -f "${NETDATA_CLAIM_PATH}" ]; then
+ err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} is not a file."
+ err_code=F0513
+ elif [ ! -x "${NETDATA_CLAIM_PATH}" ]; then
+ err_msg="Unable to claim node: claiming script at ${NETDATA_CLAIM_PATH} is not executable. Reinstalling Netdata may resolve this."
+ err_code=F0514
+ fi
+
+ if [ -n "$err_msg" ]; then
+ if [ "${ACTION}" = "claim" ]; then
+ fatal "$err_msg" "$err_code"
+ else
+ warning "$err_msg"
+ return 1
+ fi
+ fi
+
+ if ! is_netdata_running; then
+ NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running"
+ fi
+
+ # shellcheck disable=SC2086
+ run_as_root "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA}
+ case $? in
+ 0)
+ progress "Successfully claimed node"
+ return 0
+ ;;
+ 1) warning "Unable to claim node due to invalid claiming options. If you are seeing this message, you’ve probably found a bug and should open a bug report at ${AGENT_BUG_REPORT_URL}" ;;
+ 2) warning "Unable to claim node due to issues creating the claiming directory or preparing the local claiming key. Make sure you have a working openssl command and that ${INSTALL_PREFIX}/var/lib/netdata/cloud.d exists, then try again." ;;
+ 3) warning "Unable to claim node due to missing dependencies. Usually this means that the Netdata Agent was built without support for Netdata Cloud. If you built the agent from source, please install all needed dependencies for Cloud support. If you used the regular installation script and see this error, please file a bug report at ${AGENT_BUG_REPORT_URL}." ;;
+ 4) warning "Failed to claim node due to inability to connect to ${NETDATA_CLAIM_URL}. Usually this either means that the specified claiming URL is wrong, or that you are having networking problems." ;;
+ 5)
+ progress "Successfully claimed node, but was not able to notify the Netdata Agent. You will need to restart the Netdata service on this node before it will show up in the Cloud."
+ return 0
+ ;;
+ 8) warning "Failed to claim node due to an invalid agent ID. You can usually resolve this by removing ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restarting the agent. Then try to claim it again using the same options." ;;
+ 9) warning "Failed to claim node due to an invalid node name. This probably means you tried to specify a custom name for this node (for example, using the --claim-hostname option), but the hostname itself was either empty or consisted solely of whitespace. You can resolve this by specifying a valid host name and trying again." ;;
+ 10) warning "Failed to claim node due to an invalid room ID. This issue is most likely caused by a typo. Please check if the room(s) you are trying to add appear on the list of rooms provided to the --claim-rooms option ('${NETDATA_CLAIM_ROOMS}'). Then verify if the rooms are visible in Netdata Cloud and try again." ;;
+ 11) warning "Failed to claim node due to an issue with the generated RSA key pair. You can usually resolve this by removing all files in ${INSTALL_PREFIX}/var/lib/netdata/cloud.d and then trying again." ;;
+ 12) warning "Failed to claim node due to an invalid or expired claiming token. Please check that the token specified with the --claim-token option ('${NETDATA_CLAIM_TOKEN}') matches what you see in the Cloud and try again." ;;
+ 13) warning "Failed to claim node because the Cloud thinks it is already claimed. If this node was created by cloning a VM or as a container from a template, please remove the file ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restart the agent. Then try to claim it again with the same options. Otherwise, if you are certain this node has never been claimed before, you can use the --claim-id option to specify a new node ID to use for claiming, for example by using the uuidgen command like so: --claim-id \"\$(uuidgen)\"" ;;
+ 14) warning "Failed to claim node because the node is already in the process of being claimed. You should not need to do anything to resolve this, the node should show up properly in the Cloud soon. If it does not, please report a bug at ${AGENT_BUG_REPORT_URL}." ;;
+ 15|16|17) warning "Failed to claim node due to an internal server error in the Cloud. Please retry claiming this node later, and if you still see this message file a bug report at ${CLOUD_BUG_REPORT_URL}." ;;
+ 18) warning "Unable to claim node because this Netdata installation does not have a unique ID yet. Make sure the agent is running and started up correctly, and then try again." ;;
+ *) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;;
+ esac
+
+ if [ "${ACTION}" = "claim" ]; then
+ deferred_warnings
+ printf >&2 "%s\n" "For community support, you can connect with us on:"
+ support_list
+ cleanup
+ trap - EXIT
+ exit 1
+ fi
+}
+
+# ======================================================================
+# Auto-update handling code.
+set_auto_updates() {
+ if run_as_root test -x "${INSTALL_PREFIX}/usr/libexec/netdata/netdata-updater.sh"; then
+ updater="${INSTALL_PREFIX}/usr/libexec/netdata/netdata-updater.sh"
+ elif run_as_root test -x "${INSTALL_PREFIX}/netdata/usr/libexec/netdata/netdata-updater.sh"; then
+ updater="${INSTALL_PREFIX}/netdata/usr/libexec/netdata/netdata-updater.sh"
+ else
+ warning "Could not find netdata-updater.sh. This means that auto-updates cannot (currently) be enabled on this system. See https://learn.netdata.cloud/docs/agent/packaging/installer/update for more information about updating Netdata."
+ return 0
+ fi
+
+ if [ "${AUTO_UPDATE}" -eq 1 ]; then
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would have attempted to enable automatic updates."
+ # This first case is for catching using a new kickstart script with an old build. It can be safely removed after v1.34.0 is released.
+ elif ! run_as_root grep -q '\-\-enable-auto-updates' "${updater}"; then
+ echo
+ elif ! run_as_root "${updater}" --enable-auto-updates "${NETDATA_AUTO_UPDATE_TYPE}"; then
+ warning "Failed to enable auto updates. Netdata will still work, but you will need to update manually."
+ fi
+ else
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would have attempted to disable automatic updates."
+ else
+ run_as_root "${updater}" --disable-auto-updates
+ fi
+ fi
+}
+
+# ======================================================================
+# Native package install code.
+
+# Check for an already installed package with a given name.
+pkg_installed() {
+ case "${SYSTYPE}" in
+ Linux)
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ # shellcheck disable=SC2016
+ dpkg-query --show --showformat '${Status}' "${1}" 2>&1 | cut -f 1 -d ' ' | grep -q '^install$'
+ return $?
+ ;;
+ centos|fedora|opensuse|ol|amzn)
+ rpm -q "${1}" > /dev/null 2>&1
+ return $?
+ ;;
+ alpine)
+ apk -e info "${1}" > /dev/null 2>&1
+ return $?
+ ;;
+ arch)
+ pacman -Qi "${1}" > /dev/null 2>&1
+ return $?
+ ;;
+ *) return 1 ;;
+ esac
+ ;;
+ Darwin)
+ if command -v brew > /dev/null 2>&1; then
+ brew list "${1}" > /dev/null 2>&1
+ return $?
+ else
+ return 1
+ fi
+ ;;
+ FreeBSD)
+ if pkg -N > /dev/null 2>&1; then
+ pkg info "${1}" > /dev/null 2>&1
+ return $?
+ else
+ return 1
+ fi
+ ;;
+ *) return 1 ;;
+ esac
+}
+
+# Check for the existence of a usable netdata package in the repo.
+netdata_avail_check() {
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ env DEBIAN_FRONTEND=noninteractive apt-cache policy netdata | grep -q repo.netdata.cloud/repos/;
+ return $?
+ ;;
+ centos|fedora|ol|amzn)
+ # shellcheck disable=SC2086
+ ${pm_cmd} search --nogpgcheck -v netdata | grep -qE 'Repo *: netdata(-edge)?$'
+ return $?
+ ;;
+ opensuse)
+ zypper packages -r "$(zypper repos | grep -E 'netdata |netdata-edge ' | cut -f 1 -d '|' | tr -d ' ')" | grep -E 'netdata '
+ return $?
+ ;;
+ *) return 1 ;;
+ esac
+}
+
+# Check for any distro-specific dependencies we know we need.
+check_special_native_deps() {
+ if [ "${DISTRO_COMPAT_NAME}" = "centos" ] && [ "${SYSVERSION}" -gt 6 ]; then
+ progress "EPEL is required on this system, checking if it’s available."
+
+ if ${pm_cmd} search --nogpgcheck -v epel-release | grep -q "No matches found"; then
+ warning "Unable to find a suitable source for libuv, cannot install using native packages on this system."
+ return 1
+ else
+ progress "EPEL is available, attempting to install so that required dependencies are available."
+
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${install_subcmd} ${pkg_install_opts} epel-release; then
+ warning "Failed to install EPEL, even though it is required to install native packages on this system."
+ return 1
+ fi
+ fi
+ fi
+}
+
+common_rpm_opts() {
+ pkg_type="rpm"
+ pkg_suffix=".noarch"
+ pkg_vsep="-"
+ INSTALL_TYPE="binpkg-rpm"
+ NATIVE_VERSION="${INSTALL_VERSION:+"-${INSTALL_VERSION}.${SYSARCH}"}"
+}
+
+common_dnf_opts() {
+ if [ "${INTERACTIVE}" = "0" ]; then
+ interactive_opts="-y"
+ fi
+ if command -v dnf > /dev/null; then
+ pm_cmd="dnf"
+ repo_subcmd="makecache"
+ else
+ pm_cmd="yum"
+ fi
+ install_subcmd="install"
+ pkg_install_opts="${interactive_opts}"
+ repo_update_opts="${interactive_opts}"
+ uninstall_subcmd="remove"
+}
+
+try_package_install() {
+ failed_refresh_msg="Failed to refresh repository metadata. ${BADNET_MSG} or incompatibilities with one or more third-party package repositories in the system package manager configuration."
+
+ if [ -z "${DISTRO_COMPAT_NAME}" ] || [ "${DISTRO_COMPAT_NAME}" = "unknown" ]; then
+ warning "Unable to determine Linux distribution for native packages."
+ return 2
+ elif [ -z "${SYSCODENAME}" ]; then
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ warning "Release codename not set. Unable to check availability of native packages for this system."
+ return 2
+ ;;
+ esac
+ fi
+
+ set_tmpdir
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to install using native packages..."
+ else
+ progress "Attempting to install using native packages..."
+ fi
+
+ if [ "${SELECTED_RELEASE_CHANNEL}" = "nightly" ]; then
+ release="-edge"
+ else
+ release=""
+ fi
+
+ interactive_opts=""
+ env=""
+
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ if [ "${INTERACTIVE}" = "0" ]; then
+ install_subcmd="-o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold install"
+ interactive_opts="-y"
+ env="DEBIAN_FRONTEND=noninteractive"
+ else
+ install_subcmd="install"
+ fi
+ needs_early_refresh=1
+ pm_cmd="apt-get"
+ repo_subcmd="update"
+ pkg_type="deb"
+ pkg_vsep="_"
+ pkg_install_opts="${interactive_opts}"
+ repo_update_opts="${interactive_opts}"
+ uninstall_subcmd="purge"
+ repo_prefix="${DISTRO_COMPAT_NAME}/${SYSCODENAME}"
+ pkg_suffix="+${DISTRO_COMPAT_NAME}${SYSVERSION}_all"
+ INSTALL_TYPE="binpkg-deb"
+ NATIVE_VERSION="${INSTALL_VERSION:+"=${INSTALL_VERSION}"}"
+ ;;
+ centos)
+ common_rpm_opts
+ common_dnf_opts
+ repo_prefix="el/${SYSVERSION}"
+ # if [ "${SYSVERSION}" -lt 8 ]; then
+ # explicitly_install_native_plugins=1
+ # fi
+ ;;
+ fedora|ol)
+ common_rpm_opts
+ common_dnf_opts
+ repo_prefix="${DISTRO_COMPAT_NAME}/${SYSVERSION}"
+ ;;
+ opensuse)
+ if [ "${INTERACTIVE}" = "0" ]; then
+ install_subcmd="--non-interactive --no-gpg-checks install"
+ else
+ install_subcmd="--no-gpg-checks install"
+ fi
+ common_rpm_opts
+ pm_cmd="zypper"
+ repo_subcmd="--gpg-auto-import-keys refresh"
+ repo_prefix="opensuse/${SYSVERSION}"
+ pkg_install_opts="${interactive_opts} --allow-unsigned-rpm"
+ repo_update_opts=""
+ uninstall_subcmd="remove"
+ ;;
+ amzn)
+ common_rpm_opts
+ common_dnf_opts
+ repo_prefix="amazonlinux/${SYSVERSION}"
+ ;;
+ *)
+ warning "We do not provide native packages for ${DISTRO}."
+ return 2
+ ;;
+ esac
+
+ if [ -n "${SKIP_DISTRO_DETECTION}" ]; then
+ warning "Attempting to use native packages with a distro override. This is not officially supported, but may work in some cases. If your system requires a distro override to use native packages, please open an feature request at ${AGENT_BUG_REPORT_URL} about it so that we can update the installer to auto-detect this."
+ fi
+
+ if [ -n "${INSTALL_VERSION}" ]; then
+ if echo "${INSTALL_VERSION}" | grep -q "nightly"; then
+ new_release="-edge"
+ else
+ new_release=
+ fi
+
+ if { [ -n "${new_release}" ] && [ -z "${release}" ]; } || { [ -z "${new_release}" ] && [ -n "${release}" ]; }; then
+ warning "Selected release channel does not match this version and it will be changed automatically."
+ fi
+
+ release="${new_release}"
+ fi
+
+ repoconfig_name="netdata-repo${release}"
+
+ case "${pkg_type}" in
+ deb)
+ repoconfig_file="${repoconfig_name}${pkg_vsep}${REPOCONFIG_DEB_VERSION}${pkg_suffix}.${pkg_type}"
+ repoconfig_url="${REPOCONFIG_DEB_URL_PREFIX}/${repo_prefix}/${repoconfig_file}"
+ ;;
+ rpm)
+ repoconfig_file="${repoconfig_name}${pkg_vsep}${REPOCONFIG_RPM_VERSION}${pkg_suffix}.${pkg_type}"
+ repoconfig_url="${REPOCONFIG_RPM_URL_PREFIX}/${repo_prefix}/${SYSARCH}/${repoconfig_file}"
+ ;;
+ esac
+
+ if ! pkg_installed "${repoconfig_name}"; then
+ progress "Checking for availability of repository configuration package."
+ if ! check_for_remote_file "${repoconfig_url}"; then
+ warning "No repository configuration package available for ${DISTRO} ${SYSVERSION}. Cannot install native packages on this system."
+ return 2
+ fi
+
+ if ! download "${repoconfig_url}" "${tmpdir}/${repoconfig_file}"; then
+ fatal "Failed to download repository configuration package. ${BADNET_MSG}." F0209
+ fi
+
+ if [ -n "${needs_early_refresh}" ]; then
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts}; then
+ warning "${failed_refresh_msg}"
+ return 2
+ fi
+ fi
+
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${install_subcmd} ${pkg_install_opts} "${tmpdir}/${repoconfig_file}"; then
+ warning "Failed to install repository configuration package."
+ return 2
+ fi
+
+ if [ -n "${repo_subcmd}" ]; then
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts}; then
+ fatal "${failed_refresh_msg} In most cases, disabling any third-party repositories on the system and re-running the installer with the same options should work. If that does not work, consider using a static build with the --static-only option instead of native packages." F0205
+ fi
+ fi
+ else
+ progress "Repository configuration is already present, attempting to install netdata."
+ fi
+
+ if [ "${ACTION}" = "repositories-only" ]; then
+ progress "Successfully installed repository configuraion package."
+ deferred_warnings
+ cleanup
+ trap - EXIT
+ exit 1
+ fi
+
+ if ! check_special_native_deps; then
+ warning "Could not find secondary dependencies for ${DISTRO} on ${SYSARCH}."
+ if [ -z "${NO_CLEANUP}" ]; then
+ progress "Attempting to uninstall repository configuration package."
+ # shellcheck disable=SC2086
+ run_as_root env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}"
+ fi
+ return 2
+ fi
+
+ if ! netdata_avail_check "${DISTRO_COMPAT_NAME}"; then
+ warning "Could not find a usable native package for ${DISTRO} on ${SYSARCH}."
+ if [ -z "${NO_CLEANUP}" ]; then
+ progress "Attempting to uninstall repository configuration package."
+ # shellcheck disable=SC2086
+ run_as_root env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}"
+ fi
+ return 2
+ fi
+
+ if [ "${NETDATA_DISABLE_TELEMETRY}" -eq 1 ]; then
+ run_as_root mkdir -p "/etc/netdata"
+ run_as_root touch "/etc/netdata/.opt-out-from-anonymous-statistics"
+ fi
+
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${install_subcmd} ${pkg_install_opts} "netdata${NATIVE_VERSION}"; then
+ warning "Failed to install Netdata package."
+ if [ -z "${NO_CLEANUP}" ]; then
+ progress "Attempting to uninstall repository configuration package."
+ # shellcheck disable=SC2086
+ run_as_root env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}"
+ fi
+ return 2
+ fi
+
+ if [ -n "${explicitly_install_native_plugins}" ]; then
+ progress "Installing external plugins."
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} ${install_subcmd} ${DEFAULT_PLUGIN_PACKAGES}; then
+ warning "Failed to install external plugin packages. Some collectors may not be available."
+ fi
+ fi
+}
+
+# ======================================================================
+# Static build install code
+# shellcheck disable=SC2034,SC2086,SC2126
+set_static_archive_urls() {
+ if [ -z "${2}" ]; then
+ arch="${SYSARCH}"
+ else
+ arch="${2}"
+ fi
+
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ path="$(cd "${NETDATA_OFFLINE_INSTALL_SOURCE}" || exit 1; pwd)"
+ export NETDATA_STATIC_ARCHIVE_URL="file://${path}/netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_NAME="netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="file://${path}/sha256sums.txt"
+ elif [ "${1}" = "stable" ]; then
+ if [ -n "${INSTALL_VERSION}" ]; then
+ export NETDATA_STATIC_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/v${INSTALL_VERSION}/netdata-${arch}-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_OLD_URL="https://github.com/netdata/netdata/releases/download/v${INSTALL_VERSION}/netdata-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_NAME="netdata-${arch}-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_OLD_NAME="netdata-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/v${INSTALL_VERSION}/sha256sums.txt"
+ else
+ latest="$(get_redirect "https://github.com/netdata/netdata/releases/latest")"
+ export NETDATA_STATIC_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/${latest}/netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_NAME="netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/${latest}/sha256sums.txt"
+ fi
+ else
+ if [ -n "${INSTALL_VERSION}" ]; then
+ export NETDATA_STATIC_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/download/v${INSTALL_VERSION}/netdata-${arch}-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_OLD_URL="${NETDATA_TARBALL_BASEURL}/download/v${INSTALL_VERSION}/netdata-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_NAME="netdata-${arch}-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_OLD_NAME="netdata-v${INSTALL_VERSION}.gz.run"
+ export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/download/v${INSTALL_VERSION}/sha256sums.txt"
+ else
+ tag="$(get_redirect "${NETDATA_TARBALL_BASEURL}/latest")"
+ export NETDATA_STATIC_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/download/${tag}/netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_NAME="netdata-${arch}-latest.gz.run"
+ export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/download/${tag}/sha256sums.txt"
+ fi
+ fi
+}
+
+try_static_install() {
+ set_static_archive_urls "${SELECTED_RELEASE_CHANNEL}"
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to install using static build..."
+ else
+ progress "Attempting to install using static build..."
+ fi
+
+ # Check status code first, so that we can provide nicer fallback for dry runs.
+ if check_for_remote_file "${NETDATA_STATIC_ARCHIVE_URL}"; then
+ netdata_agent="${NETDATA_STATIC_ARCHIVE_NAME}"
+ elif [ "${SYSARCH}" = "x86_64" ] && check_for_remote_file "${NETDATA_STATIC_ARCHIVE_OLD_URL}"; then
+ netdata_agent="${NETDATA_STATIC_ARCHIVE_OLD_NAME}"
+ export NETDATA_STATIC_ARCHIVE_URL="${NETDATA_STATIC_ARCHIVE_OLD_URL}"
+ else
+ warning "There is no static build available for ${SYSARCH} CPUs. This usually means we simply do not currently provide static builds for ${SYSARCH} CPUs."
+ return 2
+ fi
+
+ if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "${tmpdir}/${netdata_agent}"; then
+ fatal "Unable to download static build archive for ${SYSARCH}. ${BADNET_MSG}." F0208
+ fi
+
+ if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt"; then
+ fatal "Unable to fetch checksums to verify static build archive. ${BADNET_MSG}." F0206
+ fi
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would validate SHA256 checksum of downloaded static build archive."
+ else
+ if [ -z "${INSTALL_VERSION}" ]; then
+ if ! grep "${netdata_agent}" "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Static binary checksum validation failed. ${BADCACHE_MSG}." F0207
+ fi
+ fi
+ fi
+
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ opts="${opts} --accept"
+ fi
+
+ progress "Installing netdata"
+ # shellcheck disable=SC2086
+ if ! run_as_root sh "${tmpdir}/${netdata_agent}" ${opts} -- ${NETDATA_INSTALLER_OPTIONS}; then
+ warning "Failed to install static build of Netdata on ${SYSARCH}."
+ run rm -rf /opt/netdata
+ return 2
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ install_type_file="/opt/netdata/etc/netdata/.install-type"
+ if [ -f "${install_type_file}" ]; then
+ run_as_root sh -c "cat \"${install_type_file}\" > \"${tmpdir}/install-type\""
+ run_as_root chown "$(id -u)":"$(id -g)" "${tmpdir}/install-type"
+ # shellcheck disable=SC1090,SC1091
+ . "${tmpdir}/install-type"
+ cat > "${tmpdir}/install-type" <<- EOF
+ INSTALL_TYPE='kickstart-static'
+ PREBUILT_ARCH='${PREBUILT_ARCH}'
+ EOF
+ run_as_root chown netdata:netdata "${tmpdir}/install-type"
+ run_as_root cp "${tmpdir}/install-type" "${install_type_file}"
+ fi
+ fi
+}
+
+# ======================================================================
+# Local build install code
+
+set_source_archive_urls() {
+ if [ "$1" = "stable" ]; then
+ if [ -n "${INSTALL_VERSION}" ]; then
+ export NETDATA_SOURCE_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/v${INSTALL_VERSION}/netdata-v${INSTALL_VERSION}.tar.gz"
+ export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/v${INSTALL_VERSION}/sha256sums.txt"
+ else
+ latest="$(get_redirect "https://github.com/netdata/netdata/releases/latest")"
+ export NETDATA_SOURCE_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/${latest}/netdata-${latest}.tar.gz"
+ export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/${latest}/sha256sums.txt"
+ fi
+ else
+ if [ -n "${INSTALL_VERSION}" ]; then
+ export NETDATA_SOURCE_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/download/v${INSTALL_VERSION}/netdata-latest.tar.gz"
+ export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/download/v${INSTALL_VERSION}/sha256sums.txt"
+ else
+ tag="$(get_redirect "${NETDATA_TARBALL_BASEURL}/latest")"
+ export NETDATA_SOURCE_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/download/${tag}/netdata-latest.tar.gz"
+ export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/download/${tag}/sha256sums.txt"
+ fi
+ fi
+}
+
+install_local_build_dependencies() {
+ set_tmpdir
+ bash="$(command -v bash 2> /dev/null)"
+
+ if [ -z "${bash}" ] || [ ! -x "${bash}" ]; then
+ warning "Unable to find a usable version of \`bash\` (required for local build)."
+ return 1
+ fi
+
+ if ! download "${PACKAGES_SCRIPT}" "${tmpdir}/install-required-packages.sh"; then
+ fatal "Failed to download dependency handling script for local build. ${BADNET_MSG}." F000D
+ fi
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would run downloaded script to install required build dependencies..."
+ else
+ progress "Running downloaded script to install required build dependencies..."
+ fi
+
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ opts="--dont-wait --non-interactive"
+ fi
+
+ # shellcheck disable=SC2086
+ if ! run_as_root "${bash}" "${tmpdir}/install-required-packages.sh" ${opts} netdata; then
+ warning "Failed to install all required packages, but installation might still be possible."
+ fi
+}
+
+build_and_install() {
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to build netdata..."
+ else
+ progress "Building netdata..."
+ fi
+
+ echo "INSTALL_TYPE='kickstart-build'" > system/.install-type
+
+ opts="${NETDATA_INSTALLER_OPTIONS}"
+
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ opts="${opts} --dont-wait"
+ fi
+
+ if [ "${SELECTED_RELEASE_CHANNEL}" = "stable" ]; then
+ opts="${opts} --stable-channel"
+ fi
+
+ if [ "${NETDATA_REQUIRE_CLOUD}" -eq 1 ]; then
+ opts="${opts} --require-cloud"
+ elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then
+ opts="${opts} --disable-cloud"
+ fi
+
+ # shellcheck disable=SC2086
+ run_script ./netdata-installer.sh ${opts}
+
+ case $? in
+ 1)
+ if [ -n "${EXIT_REASON}" ]; then
+ fatal "netdata-installer.sh failed to run: ${EXIT_REASON}" "${EXIT_CODE}"
+ else
+ fatal "netdata-installer.sh failed to run: Encountered an unhandled error in the installer code." I0000
+ fi
+ ;;
+ 2) fatal "Insufficient RAM to install netdata." F0008 ;;
+ esac
+}
+
+try_build_install() {
+ set_tmpdir
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would attempt to install by building locally..."
+ else
+ progress "Attempting to install by building locally..."
+ fi
+
+ if ! install_local_build_dependencies; then
+ return 1
+ fi
+
+ set_source_archive_urls "${SELECTED_RELEASE_CHANNEL}"
+
+ if [ -n "${INSTALL_VERSION}" ]; then
+ if ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz"; then
+ fatal "Failed to download source tarball for local build. ${BADNET_MSG}." F000B
+ fi
+ elif ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "${tmpdir}/netdata-latest.tar.gz"; then
+ fatal "Failed to download source tarball for local build. ${BADNET_MSG}." F000B
+ fi
+
+ if ! download "${NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt"; then
+ fatal "Failed to download checksums for source tarball verification. ${BADNET_MSG}." F000C
+ fi
+
+ if [ "${DRY_RUN}" -eq 1 ]; then
+ progress "Would validate SHA256 checksum of downloaded source archive."
+ else
+ if [ -z "${INSTALL_VERSION}" ]; then
+ # shellcheck disable=SC2086
+ if ! grep netdata-latest.tar.gz "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Tarball checksum validation failed. ${BADCACHE_MSG}." F0005
+ fi
+ fi
+ fi
+
+ if [ -n "${INSTALL_VERSION}" ]; then
+ run tar -xf "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz" -C "${tmpdir}"
+ rm -rf "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz" > /dev/null 2>&1
+ else
+ run tar -xf "${tmpdir}/netdata-latest.tar.gz" -C "${tmpdir}"
+ rm -rf "${tmpdir}/netdata-latest.tar.gz" > /dev/null 2>&1
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ cd "$(find "${tmpdir}" -mindepth 1 -maxdepth 1 -type d -name netdata-)" || fatal "Cannot change directory to netdata source tree" F0006
+ fi
+
+ if [ -x netdata-installer.sh ] || [ "${DRY_RUN}" -eq 1 ]; then
+ build_and_install || return 1
+ else
+ # This case is needed because some platforms produce an extra directory on the source tarball extraction.
+ if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then
+ cd "$(find . -mindepth 1 -maxdepth 1 -type d)" && build_and_install || return 1
+ else
+ fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh)." F0009
+ fi
+ fi
+}
+
+# ======================================================================
+# Offline install support code
+
+prepare_offline_install_source() {
+ if [ -e "${1}" ]; then
+ if [ ! -d "${1}" ]; then
+ fatal "${1} is not a directory, unable to prepare offline install source." F0503
+ fi
+ else
+ run mkdir -p "${1}" || fatal "Unable to create target directory for offline install preparation." F0504
+ fi
+
+ run cd "${1}" || fatal "Failed to switch to target directory for offline install preparation." F0505
+
+ case "${NETDATA_FORCE_METHOD}" in
+ static|'')
+ set_static_archive_urls "${SELECTED_RELEASE_CHANNEL}" "x86_64"
+
+ if check_for_remote_file "${NETDATA_STATIC_ARCHIVE_URL}"; then
+ for arch in ${STATIC_INSTALL_ARCHES}; do
+ set_static_archive_urls "${SELECTED_RELEASE_CHANNEL}" "${arch}"
+
+ progress "Fetching ${NETDATA_STATIC_ARCHIVE_URL}"
+ if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "netdata-${arch}-latest.gz.run"; then
+ warning "Failed to download static installer archive for ${arch}. ${BADNET_MSG}."
+ fi
+ done
+ legacy=0
+ else
+ warning "Selected version of Netdata only provides static builds for x86_64. You will only be able to install on x86_64 systems with this offline install source."
+ progress "Fetching ${NETDATA_STATIC_ARCHIVE_OLD_URL}"
+ legacy=1
+
+ if ! download "${NETDATA_STATIC_ARCHIVE_OLD_URL}" "netdata-x86_64-latest.gz.run"; then
+ warning "Failed to download static installer archive for x86_64. ${BADNET_MSG}."
+ fi
+ fi
+
+ progress "Fetching ${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}"
+ if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "sha256sums.txt"; then
+ fatal "Failed to download checksum file. ${BADNET_MSG}." F0506
+ fi
+ ;;
+ esac
+
+ if [ "${legacy:-0}" -eq 1 ]; then
+ sed -e 's/netdata-latest.gz.run/netdata-x86_64-latest.gz.run' sha256sums.txt > sha256sums.tmp
+ mv sha256sums.tmp sha256sums.txt
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ progress "Verifying checksums."
+ if ! grep -e "$(find . -name '*.gz.run')" sha256sums.txt | safe_sha256sum -c -; then
+ fatal "Checksums for offline install files are incorrect. ${BADCACHE_MSG}." F0507
+ fi
+ else
+ progress "Would verify SHA256 checksums of downloaded installation files."
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ progress "Preparing install script."
+ cat > "install.sh" <<-EOF
+ #!/bin/sh
+ dir=\$(CDPATH= cd -- "\$(dirname -- "\$0")" && pwd)
+ "\${dir}/kickstart.sh" --offline-install-source "\${dir}" \${@}
+ EOF
+ chmod +x "install.sh"
+ else
+ progress "Would create install script"
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ progress "Copying kickstart script."
+ cp "${KICKSTART_SOURCE}" "kickstart.sh"
+ chmod +x "kickstart.sh"
+ else
+ progress "Would copy kickstart.sh to offline install source directory"
+ fi
+
+ if [ "${DRY_RUN}" -ne 1 ]; then
+ progress "Saving release channel information."
+ echo "${SELECTED_RELEASE_CHANNEL}" > "channel"
+ else
+ progress "Would save release channel information to offline install source directory"
+ fi
+
+ progress "Finished preparing offline install source directory at ${1}. You can now copy this directory to a target system and then run the script ‘install.sh’ from it to install on that system."
+}
+
+# ======================================================================
+# Per system-type install logic
+
+install_on_linux() {
+ if [ "${NETDATA_FORCE_METHOD}" != 'static' ] && [ "${NETDATA_FORCE_METHOD}" != 'build' ] && [ -z "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ SELECTED_INSTALL_METHOD="native"
+ try_package_install
+
+ case "$?" in
+ 0)
+ NETDATA_INSTALL_SUCCESSFUL=1
+ INSTALL_PREFIX="/"
+ ;;
+ 1) fatal "Unable to install on this system." F0300 ;;
+ 2)
+ case "${NETDATA_FORCE_METHOD}" in
+ native) fatal "Could not install native binary packages." F0301 ;;
+ *) warning "Could not install native binary packages, falling back to alternative installation method." ;;
+ esac
+ ;;
+ esac
+ fi
+
+ if [ "${NETDATA_FORCE_METHOD}" != 'native' ] && [ "${NETDATA_FORCE_METHOD}" != 'build' ] && [ -z "${NETDATA_INSTALL_SUCCESSFUL}" ]; then
+ SELECTED_INSTALL_METHOD="static"
+ INSTALL_TYPE="kickstart-static"
+ try_static_install
+
+ case "$?" in
+ 0)
+ NETDATA_INSTALL_SUCCESSFUL=1
+ INSTALL_PREFIX="/opt/netdata"
+ ;;
+ 1) fatal "Unable to install on this system." F0302 ;;
+ 2)
+ case "${NETDATA_FORCE_METHOD}" in
+ static) fatal "Could not install static build." F0303 ;;
+ *) warning "Could not install static build, falling back to alternative installation method." ;;
+ esac
+ ;;
+ esac
+ fi
+
+ if [ "${NETDATA_FORCE_METHOD}" != 'native' ] && [ "${NETDATA_FORCE_METHOD}" != 'static' ] && [ -z "${NETDATA_INSTALL_SUCCESSFUL}" ]; then
+ SELECTED_INSTALL_METHOD="build"
+ INSTALL_TYPE="kickstart-build"
+ try_build_install
+
+ case "$?" in
+ 0) NETDATA_INSTALL_SUCCESSFUL=1 ;;
+ *) fatal "Unable to install on this system." F0304 ;;
+ esac
+ fi
+}
+
+install_on_macos() {
+ case "${NETDATA_FORCE_METHOD}" in
+ native) fatal "User requested native package, but native packages are not available for macOS. Try installing without \`--only-native\` option." F0305 ;;
+ static) fatal "User requested static build, but static builds are not available for macOS. Try installing without \`--only-static\` option." F0306 ;;
+ *)
+ SELECTED_INSTALL_METHOD="build"
+ INSTALL_TYPE="kickstart-build"
+ try_build_install
+
+ case "$?" in
+ 0) NETDATA_INSTALL_SUCCESSFUL=1 ;;
+ *) fatal "Unable to install on this system." F0307 ;;
+ esac
+ ;;
+ esac
+}
+
+install_on_freebsd() {
+ case "${NETDATA_FORCE_METHOD}" in
+ native) fatal "User requested native package, but native packages are not available for FreeBSD. Try installing without \`--only-native\` option." F0308 ;;
+ static) fatal "User requested static build, but static builds are not available for FreeBSD. Try installing without \`--only-static\` option." F0309 ;;
+ *)
+ SELECTED_INSTALL_METHOD="build"
+ INSTALL_TYPE="kickstart-build"
+ try_build_install
+
+ case "$?" in
+ 0) NETDATA_INSTALL_SUCCESSFUL=1 ;;
+ *) fatal "Unable to install on this system." F030A ;;
+ esac
+ ;;
+ esac
+}
+
+# ======================================================================
+# Argument parsing code
+
+handle_major_version() {
+ CONTINUE_INSTALL_PROMPT="Attempting to install will use the latest version available overall. Do you wish to continue the install?"
+
+ if [ -z "${INSTALL_MAJOR_VERSION}" ]; then
+ return
+ fi
+
+ actual_version="$(get_actual_version "v${INSTALL_MAJOR_VERSION}" "${RELEASE_CHANNEL}")"
+
+ if [ -z "${actual_version}" ]; then
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ fatal "Could not determine the lastest releaase in channel '${RELEASE_CHANNEL}' with major version '${INSTALL_MAJOR_VERSION}'" F0517
+ else
+ if confirm "Unable to determine the correct version to install for major version '${INSTALL_MAJOR_VERSION}'. ${CONTINUE_INSTALL_PROMPT}"; then
+ progress "User requested continuing the install with the latest version."
+ else
+ fatal "Cancelling installation at user request." F0518
+ fi
+ fi
+ elif [ "${actual_version}" = 'NONE' ]; then
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ warning "No releases with major version '${INSTALL_MAJOR_VERSION}' have been published. Continuing the install with the latest version instead."
+ else
+ if confirm "No releases with major version '${INSTALL_MAJOR_VERSION}' have been published. ${CONTINUE_INSTALL_PROMPT}"; then
+ progress "User requested continuing the install with the latest version."
+ else
+ fatal "Cancelling installation at user request." F0519
+ fi
+ fi
+ else
+ INSTALL_VERSION="${actual_version}"
+ fi
+}
+
+validate_args() {
+ check_claim_opts
+
+ if [ -n "${NETDATA_FORCE_METHOD}" ]; then
+ SELECTED_INSTALL_METHOD="${NETDATA_FORCE_METHOD}"
+ fi
+
+ if [ "${ACTION}" = "repositories-only" ] && [ "${NETDATA_FORCE_METHOD}" != "native" ]; then
+ fatal "Repositories can only be installed for native installs." F050D
+ fi
+
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ case "${NETDATA_FORCE_METHOD}" in
+ native|build) fatal "Offline installs are only supported for static builds currently." F0502 ;;
+ esac
+ fi
+
+ if [ -n "${LOCAL_BUILD_OPTIONS}" ]; then
+ case "${NETDATA_FORCE_METHOD}" in
+ build) NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} ${LOCAL_BUILD_OPTIONS}" ;;
+ *) fatal "Specifying local build options is only supported when the --build-only option is also specified." F0401 ;;
+ esac
+ fi
+
+ if [ -n "${STATIC_INSTALL_OPTIONS}" ]; then
+ case "${NETDATA_FORCE_METHOD}" in
+ static) NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} ${STATIC_INSTALL_OPTIONS}" ;;
+ *) fatal "Specifying installer options options is only supported when the --static-only option is also specified." F0402 ;;
+ esac
+ fi
+
+ if [ "${RELEASE_CHANNEL}" = "default" ]; then
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ SELECTED_RELEASE_CHANNEL="$(cat "${NETDATA_OFFLINE_INSTALL_SOURCE}/channel")"
+
+ if [ -z "${SELECTED_RELEASE_CHANNEL}" ]; then
+ fatal "Could not find a release channel indicator in ${NETDATA_OFFLINE_INSTALL_SOURCE}." F0508
+ fi
+ else
+ SELECTED_RELEASE_CHANNEL="${DEFAULT_RELEASE_CHANNEL}"
+ fi
+ else
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ] && [ "${RELEASE_CHANNEL}" != "$(cat "${NETDATA_OFFLINE_INSTALL_SOURCE}/channel")" ]; then
+ fatal "Release channal '${RELEASE_CHANNEL}' requested, but indicated offline installation source release channel is '$(cat "${NETDATA_OFFLINE_INSTALL_SOURCE}/channel")'." F0509
+ fi
+
+ SELECTED_RELEASE_CHANNEL="${RELEASE_CHANNEL}"
+ fi
+
+ if [ -n "${INSTALL_MAJOR_VERSION}" ] && [ -n "${INSTALL_VERSION}" ]; then
+ fatal "Only one of --install-version or --install-major-version may be specified." F0515
+ fi
+
+ handle_major_version # Appropriately updates INSTALL_VERSION if INSTALL_MAJOR_VERSION is set.
+
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ] && [ -n "${INSTALL_VERSION}" ]; then
+ fatal "Specifying an install version alongside an offline install source is not supported." F050A
+ fi
+
+ if [ "${NETDATA_AUTO_UPDATES}" = "default" ]; then
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ] || [ -n "${INSTALL_VERSION}" ]; then
+ AUTO_UPDATE=0
+ else
+ AUTO_UPDATE=1
+ fi
+ elif [ "${NETDATA_INSTALL_MAJOR_VERSION}" ]; then
+ warning "Forcibly disabling auto updates as a specific major version was requested."
+ AUTO_UPDATE=0
+ elif [ "${NETDATA_AUTO_UPDATES}" = 1 ]; then
+ AUTO_UPDATE=1
+ else
+ AUTO_UPDATE=0
+ fi
+}
+
+set_action() {
+ new_action="${1}"
+
+ if [ -n "${ACTION}" ]; then
+ warning "Ignoring previously specified '${ACTION}' operation in favor of '${new_action}' specified later on the command line."
+ fi
+
+ ACTION="${new_action}"
+ NETDATA_COMMAND="${new_action}"
+}
+
+parse_args() {
+ while [ -n "${1}" ]; do
+ case "${1}" in
+ "--help")
+ usage
+ cleanup
+ trap - EXIT
+ exit 0
+ ;;
+ "--no-cleanup") NO_CLEANUP=1 ;;
+ "--dont-wait"|"--non-interactive") INTERACTIVE=0 ;;
+ "--interactive") INTERACTIVE=1 ;;
+ "--dry-run") DRY_RUN=1 ;;
+ "--release-channel")
+ RELEASE_CHANNEL="$(echo "${2}" | tr '[:upper:]' '[:lower:]')"
+ case "${RELEASE_CHANNEL}" in
+ nightly|stable|default) shift 1 ;;
+ *)
+ echo "Unrecognized value for --release-channel. Valid release channels are: stable, nightly, default"
+ exit 1
+ ;;
+ esac
+ ;;
+ "--stable-channel") RELEASE_CHANNEL="stable" ;;
+ "--nightly-channel") RELEASE_CHANNEL="nightly" ;;
+ "--reinstall") set_action 'reinstall' ;;
+ "--reinstall-even-if-unsafe") set_action 'unsafe-reinstall' ;;
+ "--reinstall-clean") set_action 'reinstall-clean' ;;
+ "--uninstall") set_action 'uninstall' ;;
+ "--claim-only") set_action 'claim' ;;
+ "--no-updates") NETDATA_AUTO_UPDATES=0 ;;
+ "--auto-update") NETDATA_AUTO_UPDATES="1" ;;
+ "--auto-update-type"|"--auto-update-method")
+ NETDATA_AUTO_UPDATE_TYPE="$(echo "${2}" | tr '[:upper:]' '[:lower:]')"
+ case "${NETDATA_AUTO_UPDATE_TYPE}" in
+ systemd|interval|crontab) shift 1 ;;
+ *)
+ echo "Unrecognized value for --auto-update-type. Valid values are: systemd, interval, crontab"
+ exit 1
+ ;;
+ esac
+ ;;
+ "--disable-cloud")
+ NETDATA_DISABLE_CLOUD=1
+ NETDATA_REQUIRE_CLOUD=0
+ ;;
+ "--require-cloud")
+ NETDATA_DISABLE_CLOUD=0
+ NETDATA_REQUIRE_CLOUD=1
+ ;;
+ "--dont-start-it")
+ NETDATA_NO_START=1
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --dont-start-it"
+ ;;
+ "--disable-telemetry")
+ NETDATA_DISABLE_TELEMETRY="1"
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --disable-telemetry"
+ ;;
+ "--install-prefix")
+ INSTALL_PREFIX="${2}"
+ shift 1
+ ;;
+ "--old-install-prefix")
+ OLD_INSTALL_PREFIX="${2}"
+ shift 1
+ ;;
+ "--install-major-version")
+ INSTALL_MAJOR_VERSION="${2}"
+ shift 1
+ ;;
+ "--install-version")
+ INSTALL_VERSION="${2}"
+ AUTO_UPDATE=0
+ shift 1
+ ;;
+ "--distro-override")
+ if [ -n "${2}" ]; then
+ SKIP_DISTRO_DETECTION=1
+ DISTRO="$(echo "${2}" | cut -f 1 -d ':' | tr '[:upper:]' '[:lower:]')"
+ SYSVERSION="$(echo "${2}" | cut -f 2 -d ':')"
+ SYSCODENAME="$(echo "${2}" | cut -f 3 -d ':' | tr '[:upper:]' '[:lower:]')"
+
+ if [ -z "${SYSVERSION}" ]; then
+ fatal "You must specify a release as well as a distribution name." F0510
+ fi
+
+ shift 1
+ else
+ fatal "A distribution name and release must be specified for the --distro-override option." F050F
+ fi
+ ;;
+ "--repositories-only")
+ set_action 'repositories-only'
+ NETDATA_FORCE_METHOD="native"
+ ;;
+ "--native-only") NETDATA_FORCE_METHOD="native" ;;
+ "--static-only") NETDATA_FORCE_METHOD="static" ;;
+ "--build-only") NETDATA_FORCE_METHOD="build" ;;
+ "--claim-token")
+ NETDATA_CLAIM_TOKEN="${2}"
+ shift 1
+ ;;
+ "--claim-rooms")
+ NETDATA_CLAIM_ROOMS="${2}"
+ shift 1
+ ;;
+ "--claim-url")
+ NETDATA_CLAIM_URL="${2}"
+ shift 1
+ ;;
+ "--claim-"*)
+ optname="$(echo "${1}" | cut -d '-' -f 4-)"
+ case "${optname}" in
+ id|proxy|user|hostname)
+ NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}=${2}"
+ shift 1
+ ;;
+ verbose|insecure|noproxy|noreload|daemon-not-running) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" ;;
+ *) warning "Ignoring unrecognized claiming option ${optname}" ;;
+ esac
+ ;;
+ "--local-build-options")
+ LOCAL_BUILD_OPTIONS="${LOCAL_BUILD_OPTIONS} ${2}"
+ shift 1
+ ;;
+ "--static-install-options")
+ STATIC_INSTALL_OPTIONS="${STATIC_INSTALL_OPTIONS} ${2}"
+ shift 1
+ ;;
+ "--prepare-offline-install-source")
+ if [ -n "${2}" ]; then
+ set_action 'prepare-offline'
+ OFFLINE_TARGET="${2}"
+ shift 1
+ else
+ fatal "A target directory must be specified with the --prepare-offline-install-source option." F0500
+ fi
+ ;;
+ "--offline-install-source")
+ if [ -d "${2}" ]; then
+ NETDATA_OFFLINE_INSTALL_SOURCE="${2}"
+ shift 1
+ else
+ fatal "A source directory must be specified with the --offline-install-source option." F0501
+ fi
+ ;;
+ "--"|"all"|"--yes"|"-y"|"--force"|"--accept") warning "Option '${1}' is not recognized, ignoring it. ${BADOPT_MSG}" ;;
+ *) fatal "Unrecognized option '${1}'. ${BADOPT_MSG}" F050E ;;
+ esac
+ shift 1
+ done
+
+ validate_args
+}
+
+# ======================================================================
+# Main program
+
+setup_terminal || echo > /dev/null
+
+# shellcheck disable=SC2068
+parse_args $@
+
+confirm_root_support
+get_system_info
+confirm_install_prefix
+
+if [ -z "${ACTION}" ]; then
+ handle_existing_install
+fi
+
+main
diff --git a/packaging/installer/methods/ansible.md b/packaging/installer/methods/ansible.md
new file mode 100644
index 00000000..6ce4e8f0
--- /dev/null
+++ b/packaging/installer/methods/ansible.md
@@ -0,0 +1,156 @@
+<!--
+title: "Deploy Netdata with Ansible"
+description: "Deploy an infrastructure monitoring solution in minutes with the Netdata Agent and Ansible. Use and customize a simple playbook for monitoring as code."
+image: /img/seo/guides/deploy/ansible.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/ansible.md
+sidebar_label: "Ansible"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Deploy Netdata with Ansible
+
+Netdata's [one-line kickstart](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#install-on-linux-with-one-line-installer) is zero-configuration, highly adaptable, and compatible with tons
+of different operating systems and Linux distributions. You can use it on bare metal, VMs, containers, and everything
+in-between.
+
+But what if you're trying to bootstrap an infrastructure monitoring solution as quickly as possible? What if you need to
+deploy Netdata across an entire infrastructure with many nodes? What if you want to make this deployment reliable,
+repeatable, and idempotent? What if you want to write and deploy your infrastructure or cloud monitoring system like
+code?
+
+Enter [Ansible](https://ansible.com), a popular system provisioning, configuration management, and infrastructure as
+code (IaC) tool. Ansible uses **playbooks** to glue many standardized operations together with a simple syntax, then run
+those operations over standard and secure SSH connections. There's no agent to install on the remote system, so all you
+have to worry about is your application and your monitoring software.
+
+Ansible has some competition from the likes of [Puppet](https://puppet.com/) or [Chef](https://www.chef.io/), but the
+most valuable feature about Ansible is **idempotent**. From the [Ansible
+glossary](https://docs.ansible.com/ansible/latest/reference_appendices/glossary.html)
+
+> An operation is idempotent if the result of performing it once is exactly the same as the result of performing it
+> repeatedly without any intervening actions.
+
+Idempotency means you can run an Ansible playbook against your nodes any number of times without affecting how they
+operate. When you deploy Netdata with Ansible, you're also deploying _monitoring as code_.
+
+In this guide, we'll walk through the process of using an [Ansible
+playbook](https://github.com/netdata/community/tree/main/configuration-management/ansible-quickstart/) to automatically
+deploy the Netdata Agent to any number of distributed nodes, manage the configuration of each node, and connect them to
+your Netdata Cloud account. You'll go from some unmonitored nodes to a infrastructure monitoring solution in a matter of
+minutes.
+
+## Prerequisites
+
+- A Netdata Cloud account. [Sign in and create one](https://app.netdata.cloud) if you don't have one already.
+- An administration system with [Ansible](https://www.ansible.com/) installed.
+- One or more nodes that your administration system can access via [SSH public
+ keys](https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key) (preferably password-less).
+
+## Download and configure the playbook
+
+First, download the
+[playbook](https://github.com/netdata/community/tree/main/configuration-management/ansible-quickstart/), move it to the
+current directory, and remove the rest of the cloned repository, as it's not required for using the Ansible playbook.
+
+```bash
+git clone https://github.com/netdata/community.git
+mv community/configuration-management/ansible-quickstart .
+rm -rf community
+```
+
+Or if you don't want to clone the entire repository, use the [gitzip browser extension](https://gitzip.org/) to get the netdata-agent-deployment directory as a zip file.
+
+Next, `cd` into the Ansible directory.
+
+```bash
+cd ansible-quickstart
+```
+
+### Edit the `hosts` file
+
+The `hosts` file contains a list of IP addresses or hostnames that Ansible will try to run the playbook against. The
+`hosts` file that comes with the repository contains two example IP addresses, which you should replace according to the
+IP address/hostname of your nodes.
+
+```conf
+203.0.113.0 hostname=node-01
+203.0.113.1 hostname=node-02
+```
+
+You can also set the `hostname` variable, which appears both on the local Agent dashboard and Netdata Cloud, or you can
+omit the `hostname=` string entirely to use the system's default hostname.
+
+#### Set the login user (optional)
+
+If you SSH into your nodes as a user other than `root`, you need to configure `hosts` according to those user names. Use
+the `ansible_user` variable to set the login user. For example:
+
+```conf
+203.0.113.0 hostname=ansible-01 ansible_user=example
+```
+
+#### Set your SSH key (optional)
+
+If you use an SSH key other than `~/.ssh/id_rsa` for logging into your nodes, you can set that on a per-node basis in
+the `hosts` file with the `ansible_ssh_private_key_file` variable. For example, to log into a Lightsail instance using
+two different SSH keys supplied by AWS.
+
+```conf
+203.0.113.0 hostname=ansible-01 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-west-2.pem
+203.0.113.1 hostname=ansible-02 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-east-1.pem
+```
+
+### Edit the `vars/main.yml` file
+
+In order to connect your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite
+charts](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) or perform [Metric
+Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md), you need to set the `claim_token`
+and `claim_room` variables.
+
+To find your `claim_token` and `claim_room`, go to Netdata Cloud, then click on your Space's name in the top navigation,
+then click on **Manage your Space**. Click on the **Nodes** tab in the panel that appears, which displays a script with
+`token` and `room` strings.
+
+![Animated GIF of finding the claiming script and the token and room
+strings](https://user-images.githubusercontent.com/1153921/98740235-f4c3ac00-2367-11eb-8ffd-e9ab0f04c463.gif)
+
+Copy those strings into the `claim_token` and `claim_rooms` variables.
+
+```yml
+claim_token: XXXXX
+claim_rooms: XXXXX
+```
+
+Change the `dbengine_multihost_disk_space` if you want to change the metrics retention policy by allocating more or less
+disk space for storing metrics. The default is 2048 Mib, or 2 GiB.
+
+Because we're connecting this node to Netdata Cloud, and will view its dashboards there instead of via the IP address or
+hostname of the node, the playbook disables that local dashboard by setting `web_mode` to `none`. This gives a small
+security boost by not allowing any unwanted access to the local dashboard.
+
+You can read more about this decision, or other ways you might lock down the local dashboard, in our [node security
+doc](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md).
+
+> Curious about why Netdata's dashboard is open by default? Read our [blog
+> post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) on that zero-configuration design decision.
+
+## Run the playbook
+
+Time to run the playbook from your administration system:
+
+```bash
+ansible-playbook -i hosts tasks/main.yml
+```
+
+Ansible first connects to your node(s) via SSH, then [collects
+facts](https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts) about the system.
+This playbook doesn't use these facts, but you could expand it to provision specific types of systems based on the
+makeup of your infrastructure.
+
+Next, Ansible makes changes to each node according to the `tasks` defined in the playbook, and
+[returns](https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#changed) whether each
+task results in a changed, failure, or was skipped entirely.
+
+The task to install Netdata will take a few minutes per node, so be patient! Once the playbook reaches the connect to Cloud
+task, your nodes start populating your Space in Netdata Cloud.
diff --git a/packaging/installer/methods/aws.md b/packaging/installer/methods/aws.md
new file mode 100644
index 00000000..c0b92a03
--- /dev/null
+++ b/packaging/installer/methods/aws.md
@@ -0,0 +1,67 @@
+<!--
+title: "Install Netdata on AWS"
+description: "The Netdata Agent runs on all popular cloud providers, but often requires additional steps and configuration for full functionality."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/aws.md
+sidebar_label: "AWS"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on AWS
+
+Netdata is fully compatible with Amazon Web Services (AWS).
+You can install Netdata on cloud instances to monitor the apps/services running there, or use
+multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+
+## Recommended installation method
+
+The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
+we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+
+If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
+followed by the solution for your provider.
+
+## Post-installation configuration
+
+### Add a firewall rule to access Netdata's dashboard
+
+If you cannot access Netdata's dashboard on your cloud instance via `http://HOST:19999`, and instead get an error page
+from your browser that says, "This site can't be reached" (Chrome) or "Unable to connect" (Firefox), you may need to
+configure your cloud provider's firewall.
+
+Cloud providers often create network-level firewalls that run separately from the instance itself. Both AWS and Google
+Cloud Platform calls them Virtual Private Cloud (VPC) networks. These firewalls can apply even if you've disabled
+firewalls on the instance itself. Because you can modify these firewalls only via the cloud provider's web interface,
+it's easy to overlook them when trying to configure and access Netdata's dashboard.
+
+You can often confirm a firewall issue by querying the dashboard while connected to the instance via SSH: `curl
+http://localhost:19999/api/v1/info`. If you see JSON output, Netdata is running properly. If you try the same `curl`
+command from a remote system, and it fails, it's likely that a firewall is blocking your requests.
+
+Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
+(80/443), which are likely already open on your instance. We have a number of guides available:
+
+- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
+- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
+- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
+- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
+- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+
+Sign in to the [AWS console](https://console.aws.amazon.com/) and navigate to the EC2 dashboard. Click on the **Security
+Groups** link in the navigation, beneath the **Network & Security** heading. Find the Security Group your instance
+belongs to, and either right-click on it or click the **Actions** button above to see a dropdown menu with **Edit
+inbound rules**.
+
+Add a new rule with the following options:
+
+```conf
+Type: Custom TCP
+Protocol: TCP
+Port Range: 19999
+Source: Anywhere
+Description: Netdata
+```
+
+You can also choose **My IP** as the source if you prefer.
+
+Click **Save** to apply your new inbound firewall rule.
diff --git a/packaging/installer/methods/azure.md b/packaging/installer/methods/azure.md
new file mode 100644
index 00000000..4c39a00a
--- /dev/null
+++ b/packaging/installer/methods/azure.md
@@ -0,0 +1,68 @@
+<!--
+title: "Install Netdata on Azure"
+description: "The Netdata Agent runs on all popular cloud providers, but often requires additional steps and configuration for full functionality."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/azure.md
+sidebar_label: "Azure"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on Azure
+
+Netdata is fully compatible with Azure.
+You can install Netdata on cloud instances to monitor the apps/services running there, or use
+multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+
+## Recommended installation method
+
+The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
+we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+
+If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
+followed by the solution for your provider.
+
+## Post-installation configuration
+
+### Add a firewall rule to access Netdata's dashboard
+
+If you cannot access Netdata's dashboard on your cloud instance via `http://HOST:19999`, and instead get an error page
+from your browser that says, "This site can't be reached" (Chrome) or "Unable to connect" (Firefox), you may need to
+configure your cloud provider's firewall.
+
+Cloud providers often create network-level firewalls that run separately from the instance itself. Both AWS and Google
+Cloud Platform calls them Virtual Private Cloud (VPC) networks. These firewalls can apply even if you've disabled
+firewalls on the instance itself. Because you can modify these firewalls only via the cloud provider's web interface,
+it's easy to overlook them when trying to configure and access Netdata's dashboard.
+
+You can often confirm a firewall issue by querying the dashboard while connected to the instance via SSH: `curl
+http://localhost:19999/api/v1/info`. If you see JSON output, Netdata is running properly. If you try the same `curl`
+command from a remote system, and it fails, it's likely that a firewall is blocking your requests.
+
+Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
+(80/443), which are likely already open on your instance. We have a number of guides available:
+
+- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
+- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
+- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
+- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
+- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+
+Sign in to the [Azure portal](https://portal.azure.com) and open the virtual machine running Netdata. Click on the
+**Networking** link beneath the **Settings** header, then click on the **Add inbound security rule** button.
+
+Add a new rule with the following options:
+
+```conf
+Source: Any
+Source port ranges: 19999
+Destination: Any
+Destination port ranges: 19999
+Protocol: TCP
+Action: Allow
+Priority: 310
+Name: Netdata
+```
+
+Click **Add** to apply your new inbound security rule.
+
+
diff --git a/packaging/installer/methods/freebsd.md b/packaging/installer/methods/freebsd.md
new file mode 100644
index 00000000..21670cdc
--- /dev/null
+++ b/packaging/installer/methods/freebsd.md
@@ -0,0 +1,148 @@
+<!--
+title: "Install Netdata on FreeBSD"
+description: "Install Netdata on FreeBSD to monitor the health and performance of bare metal or VMs with thousands of real-time, per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/freebsd.md
+sidebar_label: "FreeBSD"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on FreeBSD
+
+> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the
+> details of the installation process, such as version numbers for downloadable packages, before proceeding.
+>
+> You can help improve this document by [submitting a
+> PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/freebsd.md) with your recommended
+> improvements or changes. Thank you!
+
+## Install dependencies
+
+This step needs root privileges.
+
+```sh
+pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake
+```
+
+Please respond in the affirmative for any relevant prompts during the installation process.
+
+## Install Netdata
+
+The simplest method is to use the single line [kickstart script](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/kickstart)
+
+If you have a Netdata cloud account then clicking on the **Connect Nodes** button will generate the kickstart command you should use. Use the command from the "Linux" tab, it should look something like this:
+
+```sh
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --claim-token <CLAIM_TOKEN> --claim-url https://app.netdata.cloud
+```
+Please respond in the affirmative for any relevant prompts during the installation process.
+
+Once the installation is completed, you should be able to start monitoring the FreeBSD server using Netdata.
+
+![image](https://user-images.githubusercontent.com/24860547/202489210-3c5a3346-8f53-4b7b-9832-f9383b34d864.png)
+
+Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).
+
+## Manual installation
+
+If you would prefer to manually install Netdata, the following steps can help you do this.
+
+Download Netdata:
+
+```sh
+fetch https://github.com/netdata/netdata/releases/download/v1.36.1/netdata-v1.36.1.tar.gz
+```
+
+> ⚠️ Verify the latest version by either navigating to [Netdata's latest
+> release](https://github.com/netdata/netdata/releases/latest) or using `curl`:
+>
+> ```bash
+> basename $(curl -Ls -o /dev/null -w %{url_effective} https://github.com/netdata/netdata/releases/latest)
+> ```
+
+Unzip the downloaded file:
+
+```sh
+gunzip netdata*.tar.gz && tar xf netdata*.tar && rm -rf netdata*.tar
+```
+
+Install Netdata in `/opt/netdata`. If you want to enable automatic updates, add `--auto-update` or `-u` to install `netdata-updater` in `cron` (**need root permission**):
+
+```sh
+cd netdata-v* && ./netdata-installer.sh --install-prefix /opt && cp /opt/netdata/usr/sbin/netdata-claim.sh /usr/sbin/
+```
+
+You also need to enable the `netdata` service in `/etc/rc.conf`:
+
+```sh
+sysrc netdata_enable="YES"
+```
+
+Finally, and very importantly, update Netdata using the script provided by the Netdata team (**need root permission**):
+
+```sh
+cd /opt/netdata/usr/libexec/netdata/ && ./netdata-updater.sh
+```
+
+You can now access the Netdata dashboard by navigating to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your system.
+
+![image](https://user-images.githubusercontent.com/2662304/48304090-fd384080-e51b-11e8-80ae-eecb03118dda.png)
+
+Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self hosted PostHog instance within the Netdata infrastructure. To read
+more about the information collected and how to opt-out, check the [anonymous statistics
+page](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md).
+
+## Updating the Agent on FreeBSD
+If you have not passed the `--auto-update` or `-u` parameter for the installer to enable automatic updating, repeat the last step to update Netdata whenever a new version becomes available.
+The `netdata-updater.sh` script will update your Agent.
+
+## Optional parameters to alter your installation
+
+The `kickstart.sh` script accepts a number of optional parameters to control how the installation process works:
+
+- `--non-interactive`: Don’t prompt for anything and assume yes whenever possible, overriding any automatic detection of an interactive run.
+- `--interactive`: Act as if running interactively, even if automatic detection indicates a run is non-interactive.
+- `--dont-wait`: Synonym for `--non-interactive`
+- `--dry-run`: Show what the installer would do, but don’t actually do any of it.
+- `--dont-start-it`: Don’t auto-start the daemon after installing. This parameter is not guaranteed to work.
+- `--release-channel`: Specify a particular release channel to install from. Currently supported release channels are:
+ - `nightly`: Installs a nightly build (this is currently the default).
+ - `stable`: Installs a stable release.
+ - `default`: Explicitly request whatever the current default is.
+- `--nightly-channel`: Synonym for `--release-channel nightly`.
+- `--stable-channel`: Synonym for `--release-channel stable`.
+- `--auto-update`: Enable automatic updates (this is the default).
+- `--no-updates`: Disable automatic updates.
+- `--disable-telemetry`: Disable anonymous statistics.
+- `--native-only`: Only install if native binary packages are available.
+- `--static-only`: Only install if a static build is available.
+- `--build-only`: Only install using a local build.
+- `--disable-cloud`: For local builds, don’t build any of the cloud code at all. For native packages and static builds,
+ use runtime configuration to disable cloud support.
+- `--require-cloud`: Only install if Netdata Cloud can be enabled. Overrides `--disable-cloud`.
+- `--install-prefix`: Specify an installation prefix for local builds (by default, we use a sane prefix based on the type of system).
+- `--install-version`: Specify the version of Netdata to install.
+- `--old-install-prefix`: Specify the custom local build's installation prefix that should be removed.
+- `--local-build-options`: Specify additional options to pass to the installer code when building locally. Only valid if `--build-only` is also specified.
+- `--static-install-options`: Specify additional options to pass to the static installer code. Only valid if --static-only is also specified.
+
+The following options are mutually exclusive and specifiy special operations other than trying to install Netdata normally or update an existing install:
+
+- `--reinstall`: If there is an existing install, reinstall it instead of trying to update it. If there is not an existing install, install netdata normally.
+- `--reinstall-even-if-unsafe`: If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we cannot detect what tyep of installation it is). If there is not an existing install, install Netdata normally.
+- `--reinstall-clean`: If there is an existing install, uninstall it before trying to install Netdata. Fails if there is no existing install.
+- `--uninstall`: Uninstall an existing installation of Netdata. Fails if there is no existing install.
+- `--claim-only`: If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally.
+- `--repositories-only`: Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only.
+- `--prepare-offline-install-source`: Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md) for more info.
+
+Additionally, the following environment variables may be used to further customize how the script runs (most users
+should not need to use special values for any of these):
+
+- `TMPDIR`: Used to specify where to put temporary files. On most systems, the default we select automatically
+ should be fine. The user running the script needs to both be able to write files to the temporary directory,
+ and run files from that location.
+- `ROOTCMD`: Used to specify a command to use to run another command with root privileges if needed. By default
+ we try to use sudo, doas, or pkexec (in that order of preference), but if you need special options for one of
+ those to work, or have a different tool to do the same thing on your system, you can specify it here.
+- `DISABLE_TELEMETRY`: If set to a value other than 0, behave as if `--disable-telemetry` was specified.
diff --git a/packaging/installer/methods/gcp.md b/packaging/installer/methods/gcp.md
new file mode 100644
index 00000000..0b16b109
--- /dev/null
+++ b/packaging/installer/methods/gcp.md
@@ -0,0 +1,70 @@
+<!--
+title: "Install Netdata on GCP"
+description: "The Netdata Agent runs on all popular cloud providers, but often requires additional steps and configuration for full functionality."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/gcp.md
+sidebar_label: "GCP"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on GCP
+
+Netdata is fully compatible with the Google Cloud Platform (GCP).
+You can install Netdata on cloud instances to monitor the apps/services running there, or use
+multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+
+## Recommended installation method
+
+The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
+we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+
+If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
+followed by the solution for your provider.
+
+## Post-installation configuration
+
+### Add a firewall rule to access Netdata's dashboard
+
+If you cannot access Netdata's dashboard on your cloud instance via `http://HOST:19999`, and instead get an error page
+from your browser that says, "This site can't be reached" (Chrome) or "Unable to connect" (Firefox), you may need to
+configure your cloud provider's firewall.
+
+Cloud providers often create network-level firewalls that run separately from the instance itself. Both AWS and Google
+Cloud Platform calls them Virtual Private Cloud (VPC) networks. These firewalls can apply even if you've disabled
+firewalls on the instance itself. Because you can modify these firewalls only via the cloud provider's web interface,
+it's easy to overlook them when trying to configure and access Netdata's dashboard.
+
+You can often confirm a firewall issue by querying the dashboard while connected to the instance via SSH: `curl
+http://localhost:19999/api/v1/info`. If you see JSON output, Netdata is running properly. If you try the same `curl`
+command from a remote system, and it fails, it's likely that a firewall is blocking your requests.
+
+Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
+(80/443), which are likely already open on your instance. We have a number of guides available:
+
+- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
+- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
+- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
+- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
+- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+
+
+To add a firewall rule, go to the [Firewall rules page](https://console.cloud.google.com/networking/firewalls/list) and
+click **Create firewall rule**.
+
+The following configuration has previously worked for Netdata running on GCP instances
+([see #7786](https://github.com/netdata/netdata/issues/7786)):
+
+```conf
+Name: <name>
+Type: Ingress
+Targets: <name-tag>
+Filters: 0.0.0.0/0
+Protocols/ports: 19999
+Action: allow
+Priority: 1000
+```
+
+Read GCP's [firewall documentation](https://cloud.google.com/vpc/docs/using-firewalls) for specific instructions on how
+to create a new firewall rule.
+
diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md
new file mode 100644
index 00000000..b21f4dde
--- /dev/null
+++ b/packaging/installer/methods/kickstart.md
@@ -0,0 +1,237 @@
+<!--
+title: "Install Netdata with kickstart.sh"
+description: "The kickstart.sh script installs Netdata from source, including all dependencies required to connect to Netdata Cloud, with a single command."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/kickstart.md"
+sidebar_label: "One line installer (kickstart.sh)"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 10
+-->
+
+import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
+import { Install, InstallBox } from '@site/src/components/Install/'
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Install Netdata with kickstart.sh
+
+![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=kickstart%20downloads&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=kickstart%20downloads&precision=0)
+
+`kickstart.sh` is the recommended way of installing Netdata.
+
+This script works on all Linux distributions and macOS environments, by detecting the optimal method of installing Netdata directly to the operating system (it will never install a docker image of Netdata - to run Netdata in a container [check Installing with Docker](https://learn.netdata.cloud/docs/installing/docker)).
+
+If you are installing on macOS, make sure to check the [install documentation for macOS](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md) before continuing.
+
+
+## Verify script integrity
+
+To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above,
+run the following:
+
+```bash
+[ "<checksum-will-be-added-in-documentation-processing>" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+```
+
+If the script is valid, this command will return `OK, VALID`.
+
+
+## Installation
+
+> :bulb: Tip
+>
+> If you are unsure whether you want nightly or stable releases, read the [installation guide](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#nightly-vs-stable-releases).
+
+To install Netdata, run the following as your normal user:
+
+<Tabs>
+ <TabItem value="wget" label="wget">
+
+ <OneLineInstallWget/>
+
+ </TabItem>
+ <TabItem value="curl" label="curl">
+
+ <OneLineInstallCurl/>
+
+ </TabItem>
+</Tabs>
+
+> :bookmark_tabs: Note
+>
+> If you plan to also connect the node to Netdata Cloud, make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space,
+> and `YOUR_ROOM_ID` with the ID of the room you are willing to connect the node to.
+
+
+## What does `kickstart.sh` do?
+
+The `kickstart.sh` script does the following after being downloaded and run using `sh`:
+
+- Determines what platform you are running on.
+- Checks for an existing installation, and if found updates that instead of creating a new install.
+- Attempts to install Netdata using our [official native binary packages](#native-packages).
+- If there are no official native binary packages for your system (or installing that way failed), tries to install
+ using a [static build of Netdata](#static-builds) if one is available.
+- If no static build is available, installs required dependencies and then attempts to install by
+ [building Netdata locally](#local-builds) (by downloading the sources and building them directly).
+- Installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated with new nightly
+ versions, unless you override that with an [optional parameter](#optional-parameters-to-alter-your-installation).
+- Prints a message whether installation succeeded or failed for QA purposes.
+
+## Optional parameters to alter your installation
+
+The `kickstart.sh` script accepts a number of optional parameters to control how the installation process works:
+
+### destination directory
+
+- `--install-prefix`
+ Specify an installation prefix for local builds (by default, we use a sane prefix based on the type of system).
+- `--old-install-prefix`
+ Specify the custom local build's installation prefix that should be removed.
+
+### interactivity
+
+The script automatically detects if it is running interactively, on a user's terminal, or headless in a CI/CD environment. These are options related to overriding this behavior.
+
+- `--non-interactive` or `--dont-wait`
+ Don’t prompt for anything and assume yes whenever possible, overriding any automatic detection of an interactive run. Use this option when installing Netdata agent with a provisioning tool or in CI/CD.
+- `--interactive`
+ Act as if running interactively, even if automatic detection indicates a run is non-interactive.
+
+### release channel
+
+By default, the script installs the nightly channel of Netdata, providing you with the most recent Netdata. For production systems where stability is more important than new features, we recommend using the stable channel.
+
+- `--release-channel`
+ Specify a particular release channel to install from. Currently supported release channels are:
+ - `nightly`: Installs a nightly build (this is currently the default).
+ - `stable`: Installs a stable release.
+ - `default`: Explicitly request whatever the current default is.
+- `--nightly-channel`
+ Synonym for `--release-channel nightly`.
+- `--stable-channel`
+ Synonym for `--release-channel stable`.
+- `--install-version`
+ Specify the exact version of Netdata to install.
+
+### install type
+
+By default the script will prefer native builds when they are available, and then static builds. It will fallback to build from source when all others are not available.
+
+- `--native-only`
+ Only install if native binary packages are available. It fails otherwise.
+- `--static-only`
+ Only install if a static build is available. It fails otherwise.
+ When installing a static build, the parameter `--static-install-options` can provide additional options to pass to the static installer code.
+- `--build-only`
+ Only install using a local build. It fails otherwise.
+ When it builds from source, the parameter `--local-build-options` can be used to give additional build options.
+
+### automatic updates
+
+By default the script installs a cron job to automatically update Netdata to the latest version of the release channel used.
+
+- `--auto-update`
+ Enable automatic updates (this is the default).
+- `--no-updates`
+ Disable automatic updates (not recommended).
+
+### Netdata Cloud related options
+
+By default, the kickstart script will provide a Netdata agent installation that can potentially communicate with Netdata Cloud, if of course the Netdata agent is further configured to do so.
+
+- `--claim-token`
+ Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node after the install. This will enable, connect and claim the Netdata agent, to Netdata Cloud.
+- `--claim-url`
+ Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`. Use this option to change the Netdata Cloud URL to point to your Netdata Cloud installation.
+- `--claim-rooms`
+ Specify a comma-separated list of tokens for each War Room this node should appear in.
+- `--claim-proxy`
+ Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy) for details.
+- `--claim-only`
+ If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally.
+- `--require-cloud`
+ Only install if Netdata Cloud can be enabled.
+- `--disable-cloud`
+ For local builds, don’t build any of the Netdata Cloud code at all. For native packages and static builds, use runtime configuration to disable Netdata Cloud support.
+
+### anonymous telemetry
+
+By default, the agent is sending anonymous telemetry data to help us take identify the most common operating systems and the configurations Netdata agents run. We use this information to prioritize our efforts towards what is most commonly used by our community.
+
+- `--disable-telemetry`
+ Disable anonymous statistics.
+
+### reinstalling
+
+- `--reinstall`
+ If there is an existing install, reinstall it instead of trying to update it. If there is not an existing install, install netdata normally.
+- `--reinstall-even-if-unsafe`
+ If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we cannot detect what type of installation it is). If there is not an existing install, install Netdata normally.
+- `--reinstall-clean`
+ If there is an existing install, uninstall it before trying to install Netdata. Fails if there is no existing install.
+
+### uninstall
+
+- `--uninstall`
+ Uninstall an existing installation of Netdata. Fails if there is no existing install.
+
+### other options
+- `--dry-run`
+ Show what the installer would do, but don’t actually do any of it.
+- `--dont-start-it`
+ Don’t auto-start the daemon after installing. This parameter is not guaranteed to work.
+- `--override-distro`
+ Override the distro detection logic and assume the system is using a specific Linux distribution and release. Takes a single argument consisting of the values of the `ID`, `VERSION_ID`, and `VERSION_CODENAME` fields from `/etc/os-release` for the desired distribution.
+
+The following options are mutually exclusive and specify special operations other than trying to install Netdata normally or update an existing install:
+
+- `--repositories-only`
+ Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only.
+- `--prepare-offline-install-source`
+ Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md) for more info.
+
+### environment variables
+
+Additionally, the following environment variables may be used to further customize how the script runs (most users
+should not need to use special values for any of these):
+
+- `TMPDIR`: Used to specify where to put temporary files. On most systems, the default we select automatically
+ should be fine. The user running the script needs to both be able to write files to the temporary directory,
+ and run files from that location.
+- `ROOTCMD`: Used to specify a command to use to run another command with root privileges if needed. By default
+ we try to use sudo, doas, or pkexec (in that order of preference), but if you need special options for one of
+ those to work, or have a different tool to do the same thing on your system, you can specify it here.
+- `DISABLE_TELEMETRY`: If set to a value other than 0, behave as if `--disable-telemetry` was specified.
+
+
+## Native packages
+
+We publish official DEB/RPM packages for a number of common Linux distributions as part of our releases and nightly
+builds. These packages are available for 64-bit x86 systems. Depending on the distribution and release they may
+also be available for 32-bit x86, ARMv7, and AArch64 systems. If a native package is available, it will be used as the
+default installation method. This allows you to handle Netdata updates as part of your usual system update procedure.
+
+If you want to enforce the usage of native packages and have the installer return a failure if they are not available,
+you can do so by adding `--native-only` to the options you pass to the installer.
+
+## Static builds
+
+We publish pre-built static builds of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7,
+AArch64, and POWER8+ hardware. These static builds are able to operate in a mostly self-contained manner and only
+require a POSIX compliant shell and a supported init system. These static builds install under `/opt/netdata`. If
+you are on a platform which we provide static builds for but do not provide native packages for, a static build
+will be used by default for installation.
+
+If you want to enforce the usage of a static build and have the installer return a failure if one is not available,
+you can do so by adding `--static-only` to the options you pass to the installer.
+
+## Local builds
+
+For systems which do not have available native packages or static builds, we support building Netdata locally on
+the system it will be installed on. When using this approach, the installer will attempt to install any required
+dependencies for building Netdata, though this may not always work correctly.
+
+If you want to enforce the usage of a local build (perhaps because you require a custom installation prefix,
+which is not supported with native packages or static builds), you can do so by adding `--build-only` to the
+options you pass to the installer.
diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md
new file mode 100644
index 00000000..17cb9f5e
--- /dev/null
+++ b/packaging/installer/methods/kubernetes.md
@@ -0,0 +1,200 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Install Netdata on Kubernetes
+
+This document details how to install Netdata on an existing Kubernetes (k8s) cluster, and connect it to Netdata Cloud. Read our [Kubernetes visualizations](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) documentation, to see what you will get.
+
+The [Netdata Helm chart](https://github.com/netdata/helmchart/blob/master/charts/netdata/README.md) installs one `parent` pod for storing metrics and managing alert notifications, plus an additional
+`child` pod for every node in the cluster, responsible for collecting metrics from the node, Kubernetes control planes,
+pods/containers, and [supported application-specific
+metrics](https://github.com/netdata/helmchart#service-discovery-and-supported-services).
+
+### Prerequisites
+
+To deploy Kubernetes monitoring with Netdata, you need:
+
+- A working cluster running Kubernetes v1.9 or newer.
+- The [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version
+ difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an
+ administrative system.
+- The [Helm package manager](https://helm.sh/) v3.0.0 or newer on the same administrative system.
+- A Netdata Cloud account with a Space to connect the cluster to.
+
+## Deploy Netdata on your Kubernetes Cluster
+
+First, you need to add the Netdata helm repository, and then install Netdata.
+The installation process securely connects your Kubernetes cluster to stream metrics data to Netdata Cloud, enabling Kubernetes-specific visualizations like the health map and time-series composite charts.
+
+<Tabs groupId="installation_type">
+<TabItem value="new_installations" label="New Installations">
+
+<h3> Install Netdata via the <code>helm install</code> command </h3>
+
+#### Steps
+
+1. Add the Netdata Helm chart repository by running:
+
+ ```bash
+ helm repo add netdata https://netdata.github.io/helmchart/
+ ```
+
+2. To install Netdata using the `helm install` command, run:
+
+ ```bash
+ helm install netdata netdata/netdata
+ ```
+
+ > ### Note
+ >
+ > If you plan to connect the node to Netdata Cloud, you can find the command with the right parameters by clicking the "Add Nodes" button in your Space's Nodes tab.
+
+ For more installation options, please read our [Netdata Helm chart for Kubernetes](https://github.com/netdata/helmchart/blob/master/charts/netdata/README.md) reference.
+
+#### Expected Result
+
+Run `kubectl get services` and `kubectl get pods` to confirm that your cluster now runs a `netdata` service, one parent pod, and multiple child pods.
+
+</TabItem>
+<TabItem value="existing_installations" label="Existing Installations">
+
+<h3> Connect an existing Netdata installation to Netdata Cloud </h3>
+
+On an existing installation, in order to connect it to Netdata Cloud you will need to override the configuration values by running the `helm upgrade` command and provide a file with the values to override.
+
+#### Steps
+
+1. You can start with creating a file called `override.yml`
+
+ ```bash
+ touch override.yml
+ ```
+
+2. Paste the following into your `override.yml` file.
+
+ ```yaml
+ parent:
+ claiming:
+ enabled: true
+ token: YOUR_CLAIM_TOKEN
+ rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B
+
+ child:
+ claiming:
+ enabled: true
+ token: YOUR_CLAIM_TOKEN
+ rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B
+ configs:
+ netdata:
+ data: |
+ [global]
+ memory mode = ram
+ history = 3600
+ [health]
+ enabled = no
+ ```
+
+ > :bookmark_tabs: Note
+ >
+ > Make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space,
+ > and `YOUR_ROOM_ID` with the ID of the room you are willing to connect to.
+
+ These settings connect your `parent`/`child` nodes to Netdata Cloud and store more metrics in the nodes' time-series databases.
+
+ > :bookmark_tabs: Info
+ >
+ > These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`history = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting.
+
+3. To apply these new settings, run:
+
+ ```bash
+ helm upgrade -f override.yml netdata netdata/netdata
+ ```
+
+#### Expected Result
+
+The cluster terminates the old pods and creates new ones with the proper persistence and connection configuration. You'll see your nodes, containers, and pods appear in Netdata Cloud in a few seconds.
+
+</TabItem>
+</Tabs>
+
+![Netdata's Kubernetes monitoring
+visualizations](https://user-images.githubusercontent.com/1153921/107801491-5dcb0f00-6d1d-11eb-9ab1-876c39f556e2.png)
+
+If you don't need to configure your Netdata deployment, [skip down](#whats-next) to see how Kubernetes monitoring works
+in Netdata, in addition to more guides and resources.
+
+## Configure your Netdata monitoring deployment
+
+Read up on the various configuration options in the [Helm chart
+documentation](https://github.com/netdata/helmchart#configuration) if you need to tweak your Kubernetes monitoring.
+
+Your first option is to create an `override.yml` file, if you haven't created one already upon [deploying](#deploy-netdata-on-your-kubernetes-cluster), then apply the new configuration to your cluster with `helm
+upgrade`.
+
+```bash
+helm upgrade -f override.yml netdata netdata/netdata
+```
+
+If you want to change only a single setting, use the `--set` argument with `helm upgrade`. For example, to change the
+size of the persistent metrics volume on the parent node:
+
+```bash
+helm upgrade --set parent.database.volumesize=4Gi netdata netdata/netdata
+```
+
+### Configure service discovery
+
+Netdata's [service discovery](https://github.com/netdata/agent-service-discovery/#service-discovery), installed as part
+of the Helm chart installation, finds what services are running in a cluster's containers and automatically collects
+service-level metrics from them.
+
+Service discovery supports [popular applications](https://github.com/netdata/helmchart#applications) and [Prometheus endpoints](https://github.com/netdata/helmchart#prometheus-endpoints).
+
+If your cluster runs services on non-default ports or uses non-default names, you may need to configure service
+discovery to start collecting metrics from your services. You have to edit the default ConfigMap that is shipped with
+the Helmchart and deploy that to your cluster.
+
+First, copy the default file to your administrative system.
+
+```bash
+curl https://raw.githubusercontent.com/netdata/helmchart/master/charts/netdata/sdconfig/child.yml -o child.yml
+```
+
+Edit the new `child.yml` file according to your needs. See the [Helm chart configuration](https://github.com/netdata/helmchart#configuration) and the file itself for details.
+
+You can then run `helm upgrade` with the `--set-file` argument to use your configured `child.yml` file instead of the
+default, changing the path if you copied it elsewhere.
+
+```bash
+helm upgrade --set-file sd.child.configmap.from.value=./child.yml netdata netdata/netdata
+```
+
+Now that you pushed an edited ConfigMap to your cluster, service discovery should find and set up metrics collection
+from your non-default service.
+
+## Update/reinstall the Netdata Helm chart
+
+If you update the Helm chart's configuration, run `helm upgrade` to redeploy your Netdata service, replacing `netdata`
+with the name of the release, if you changed it upon installation:
+
+```bash
+helm upgrade netdata netdata/netdata
+```
+
+To update Netdata's Helm chart to the latest version, run `helm repo update`, then deploy `upgrade` it`:
+
+```bash
+helm repo update
+helm upgrade netdata netdata/netdata
+```
+
+## What's next?
+
+[Start Kubernetes monitoring](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) in Netdata Cloud, which comes with meaningful visualizations out of the box.
+
+### Related reference documentation
+
+- [Netdata Cloud · Kubernetes monitoring](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md)
+- [Netdata Helm chart](https://github.com/netdata/helmchart)
+- [Netdata service discovery](https://github.com/netdata/agent-service-discovery/)
diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md
new file mode 100644
index 00000000..480a4128
--- /dev/null
+++ b/packaging/installer/methods/macos.md
@@ -0,0 +1,118 @@
+<!--
+title: "Install Netdata on macOS"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/macos.md"
+sidebar_label: "macOS"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on macOS
+
+Netdata works on macOS, albeit with some limitations.
+The number of charts displaying system metrics is limited, but you can use any of Netdata's [external plugins](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md) to monitor any services you might have installed on your macOS system.
+You could also use a macOS system as the parent node in a [streaming configuration](https://github.com/netdata/netdata/blob/master/streaming/README.md).
+
+You can install Netdata in one of the three following ways:
+
+- **[Install Netdata with the our automatic one-line installation script (recommended)](#install-netdata-with-our-automatic-one-line-installation-script)**,
+- [Install Netdata via Homebrew](#install-netdata-via-homebrew)
+- [Install Netdata from source](#install-netdata-from-source)
+
+Each of these installation option requires [Homebrew](https://brew.sh/) for handling dependencies.
+
+> The Netdata Homebrew package is community-created and -maintained.
+> Community-maintained packages _may_ receive support from Netdata, but are only a best-effort affair. Learn more about [Netdata's platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+
+## Install Netdata with our automatic one-line installation script
+
+**Local Netdata Agent installation**
+To install Netdata using our automatic [kickstart](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#automatic-one-line-installation-script) open a new terminal and run:
+
+```bash
+curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh
+```
+The Netdata Agent is installed under `/usr/local/netdata`. Dependencies are handled via Homebrew.
+
+**Automatically connect to Netdata Cloud during installation**
+
+The `kickstart.sh` script accepts additional parameters to automatically [connect](https://github.com/netdata/netdata/blob/master/claim/README.md) your node to Netdata
+Cloud immediately after installation. Find the `token` and `rooms` strings by [signing in to Netdata
+Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Connect Nodes** in the [Spaces management
+area](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/organize-your-infrastrucutre-invite-your-team.md#netdata-cloud-spaces).
+
+- `--claim-token`: Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node
+ after the install.
+- `--claim-rooms`: Specify a comma-separated list of tokens for each War Room this node should appear in.
+- `--claim-proxy`: Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy.
+ See [connecting through a proxy](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy) for details.
+- `--claim-url`: Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`.
+
+For example:
+```bash
+curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --install-prefix /usr/local/ --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud
+```
+The Netdata Agent is installed under `/usr/local/netdata` on your machine. Your machine will also show up as a node in your Netdata Cloud.
+
+If you experience issues while claiming your node, follow the steps in our [Troubleshooting](https://github.com/netdata/netdata/blob/master/claim/README.md#troubleshooting) documentation.
+## Install Netdata via Homebrew
+
+### For macOS Intel
+
+To install Netdata and all its dependencies, run Homebrew using the following command:
+
+```sh
+brew install netdata
+```
+Homebrew will place your Netdata configuration directory at `/usr/local/etc/netdata/`.
+
+Use the `edit-config` script and the files in this directory to configure Netdata. For reference, you can find stock configuration files at `/usr/local/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`.
+
+### For Apple Silicon
+
+To install Netdata and all its dependencies, run Homebrew using the following command:
+
+```sh
+brew install netdata
+```
+
+Homebrew will place your Netdata configuration directory at `/opt/homebrew/etc/netdata/`.
+
+Use the `edit-config` script and the files in this directory to configure Netdata. For reference, you can find stock configuration files at `/opt/homebrew/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`.
+
+
+
+Skip on ahead to the [What's next?](#whats-next) section to find links to helpful post-installation guides.
+
+## Install Netdata from source
+
+We don't recommend installing Netdata from source on macOS, as it can be difficult to configure and install dependencies manually.
+
+1. Open your terminal of choice and install the Xcode development packages:
+
+ ```bash
+ xcode-select --install
+ ```
+
+2. Click **Install** on the Software Update popup window that appears.
+3. Use the same terminal session to install some of Netdata's prerequisites using Homebrew. If you don't want to use [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you can omit `cmake`.
+
+ ```bash
+ brew install ossp-uuid autoconf automake pkg-config libuv lz4 json-c openssl libtool cmake
+ ```
+
+4. Download Netdata from our GitHub repository:
+
+ ```bash
+ git clone https://github.com/netdata/netdata.git --recursive
+ ```
+
+5. `cd` into the newly-created directory and then start the installer script:
+
+ ```bash
+ cd netdata/
+ sudo ./netdata-installer.sh --install-prefix /usr/local
+ ```
+
+> Your Netdata configuration directory will be at `/usr/local/netdata/`.
+> Your stock configuration directory will be at `/usr/local/lib/netdata/conf.d/`.
+> The installer will also install a startup plist to start Netdata when your macOS system boots.
diff --git a/packaging/installer/methods/manual.md b/packaging/installer/methods/manual.md
new file mode 100644
index 00000000..269b67c1
--- /dev/null
+++ b/packaging/installer/methods/manual.md
@@ -0,0 +1,248 @@
+<!--
+title: "Install Netdata on Linux from a Git checkout"
+description: "Use the Netdata Agent source code from GitHub, plus helper scripts to set up your system, to install Netdata without packages or binaries."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/manual.md"
+sidebar_label: "From a Git checkout"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 30
+-->
+
+# Install Netdata on Linux from a Git checkout
+
+To install the latest git version of Netdata, please follow these 2 steps:
+
+1. [Prepare your system](#prepare-your-system)
+
+ Install the required packages on your system.
+
+2. [Install Netdata](#install-netdata)
+
+ Download and install Netdata. You can also update it the same way.
+
+## Prepare your system
+
+Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
+Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+
+Use our automatic requirements installer (_no need to be `root`_), which attempts to find the packages that
+should be installed on your system to build and run Netdata. It supports a large variety of major Linux distributions
+and other operating systems and is regularly tested. You can find this tool [here](https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh) or run it directly with `bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh)`. Otherwise read on for how to get requires packages manually:
+
+- **Alpine** Linux and its derivatives
+ - You have to install `bash` yourself, before using the installer.
+
+- **Gentoo** Linux and its derivatives
+
+- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**)
+
+- **Red Hat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**)
+ - Please note that for RHEL/CentOS you need
+ [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/).
+ In addition, RHEL/CentOS version 6 also need
+ [OKay](https://okay.com.mx/blog-news/rpm-repositories-for-centos-6-and-7.html) for package libuv version 1.
+ - CentOS 8 / RHEL 8 requires a bit of extra work. See the dedicated section below.
+
+- **SUSE** Linux and its derivatives (including **openSUSE**)
+
+- **SLE12** Must have your system registered with SUSE Customer Center or have the DVD. See
+ [#1162](https://github.com/netdata/netdata/issues/1162)
+
+Install the packages for having a **basic Netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `named`, hardware sensors and `SNMP`):
+
+```sh
+curl -Ss 'https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh' >/tmp/install-required-packages.sh && bash /tmp/install-required-packages.sh -i netdata
+```
+
+Install all the required packages for **monitoring everything Netdata can monitor**:
+
+```sh
+curl -Ss 'https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh' >/tmp/install-required-packages.sh && bash /tmp/install-required-packages.sh -i netdata-all
+```
+
+If the above do not work for you, please [open a github
+issue](https://github.com/netdata/netdata/issues/new?title=packages%20installer%20failed&labels=installation%20help&body=The%20experimental%20packages%20installer%20failed.%0A%0AThis%20is%20what%20it%20says:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20screen%20here%0A%0A%60%60%60)
+with a copy of the message you get on screen. We are trying to make it work everywhere (this is also why the script
+[reports back](https://github.com/netdata/netdata/issues/2054) success or failure for all its runs).
+
+---
+
+This is how to do it by hand:
+
+```sh
+# Debian / Ubuntu
+apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libssl-dev libelf-dev libmnl-dev libprotobuf-dev protobuf-compiler gcc g++ make git autoconf autoconf-archive autogen automake pkg-config curl python cmake
+
+# Fedora
+dnf install zlib-devel libuuid-devel libuv-devel lz4-devel openssl-devel elfutils-libelf-devel libmnl-devel protobuf-devel protobuf-compiler gcc gcc-c++ make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python cmake
+
+# CentOS / Red Hat Enterprise Linux
+yum install autoconf automake curl gcc gcc-c++ git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel elfutils-libelf-devel protobuf protobuf-devel protobuf-compiler make nc pkgconfig python zlib-devel cmake
+
+# openSUSE
+zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel libopenssl-devel libelf-devel libmnl-devel protobuf-devel gcc gcc-c++ make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python cmake
+```
+
+Once Netdata is compiled, to run it the following packages are required (already installed using the above commands):
+
+| package | description|
+|:-----:|-----------|
+| `libuuid` | part of `util-linux` for GUIDs management|
+| `zlib` | gzip compression for the internal Netdata web server|
+| `libuv` | Multi-platform support library with a focus on asynchronous I/O, version 1 or greater|
+
+*Netdata will fail to start without the above.*
+
+Netdata plugins and various aspects of Netdata can be enabled or benefit when these are installed (they are optional):
+
+| package |description|
+|:-----:|-----------|
+| `bash`|for shell plugins and **alert notifications**|
+| `curl`|for shell plugins and **alert notifications**|
+| `iproute` or `iproute2`|for monitoring **Linux traffic QoS**<br/>use `iproute2` if `iproute` reports as not available or obsolete|
+| `python`|for most of the external plugins|
+| `python-yaml`|used for monitoring **beanstalkd**|
+| `python-beanstalkc`|used for monitoring **beanstalkd**|
+| `python-mysqldb`<br/>or<br/>`python-pymysql`|used for monitoring **mysql** or **mariadb** databases<br/>`python-mysqldb` is a lot faster and thus preferred|
+| `nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices|
+| `lm-sensors`|for monitoring **hardware sensors**|
+| `libelf`|for monitoring kernel-level metrics using eBPF|
+| `libmnl`|for collecting netfilter metrics|
+| `netcat`|for shell plugins to collect metrics from remote systems|
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+Netdata DB engine can be enabled when these are installed (they are optional):
+
+| package | description|
+|:-----:|-----------|
+| `liblz4` | Extremely fast compression algorithm, version r129 or greater|
+| `openssl`| Cryptography and SSL/TLS toolkit|
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+Netdata Cloud support may require the following packages to be installed:
+
+| package | description |
+|:---------:|--------------------------------------------------------------------------------------------------------------------------------------|
+| `cmake` | Needed at build time if you aren't using your distribution's version of libwebsockets or are building on a platform other than Linux |
+| `openssl` | Needed to secure communications with the Netdata Cloud |
+| `protobuf`| Used for the new Cloud<->Agent binary protocol |
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+### CentOS / RHEL 6.x
+
+On CentOS / RHEL 6.x, many of the dependencies for Netdata are only
+available with versions older than what we need, so special setup is
+required if manually installing packages.
+
+CentOS 6.x:
+
+- Enable the EPEL repo
+- Enable the additional repo from [okay.network](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+
+And install the minimum required dependencies.
+
+### CentOS / RHEL 8.x
+
+For CentOS / RHEL 8.x a lot of development packages have moved out into their
+own separate repositories. Some other dependencies are either missing completely
+or have to be sourced by 3rd-parties.
+
+CentOS 8.x:
+
+- Enable the PowerTools repo
+- Enable the EPEL repo
+- Enable the Extra repo from [OKAY](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+
+And install the minimum required dependencies:
+
+```sh
+# Enable config-manager
+yum install -y 'dnf-command(config-manager)'
+
+# Enable PowerTools
+yum config-manager --set-enabled powertools
+
+# Enable EPEL
+yum install -y epel-release
+
+# Install Repo for libuv-devl (NEW)
+yum install -y http://repo.okay.com.mx/centos/8/x86_64/release/okay-release-1-3.el8.noarch.rpm
+
+# Install Devel Packages
+yum install autoconf automake curl gcc git cmake libuuid-devel openssl-devel libuv-devel lz4-devel make nc pkgconfig python3 zlib-devel
+
+```
+
+## Install Netdata
+
+Do this to install and run Netdata:
+
+```sh
+# download it - the directory 'netdata' will be created
+git clone https://github.com/netdata/netdata.git --depth=100 --recursive
+cd netdata
+
+# run script with root privileges to build, install, start Netdata
+./netdata-installer.sh
+```
+
+- If you don't want to run it straight-away, add `--dont-start-it` option.
+
+- You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds.
+
+- If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install-prefix /opt`. This one will install Netdata in `/opt/netdata`.
+
+- If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`.
+
+## Optional parameters to alter your installation
+
+`netdata-installer.sh` accepts a few parameters to customize your installation:
+
+- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
+- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
+- `--stable-channel`: Automatically update only on the release of new major versions.
+- `--nightly-channel`: Automatically update on every new nightly build.
+- `--disable-telemetry`: Opt-out of [anonymous statistics](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) we use to make
+ Netdata better.
+- `--no-updates`: Prevent automatic updates of any kind.
+- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this
+ cannot be used to change installation types.
+- `--local-files`: Used for [offline installations](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md). Pass four file paths: the Netdata
+ tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force kickstart run the
+ process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_
+ `--stable-channel`, Netdata will use the local files.
+
+### Connect node to Netdata Cloud during installation
+
+Unlike the [`kickstart.sh`](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md), the `netdata-installer.sh` script does
+not allow you to automatically [connect](https://github.com/netdata/netdata/blob/master/claim/README.md) your node to Netdata Cloud immediately after installation.
+
+See the [connect to cloud](https://github.com/netdata/netdata/blob/master/claim/README.md) doc for details on connecting a node with a manual installation of Netdata.
+
+### 'nonrepresentable section on output' errors
+
+Our current build process unfortunately has some issues when using certain configurations of the `clang` C compiler on Linux.
+
+If the installation fails with errors like `/bin/ld: externaldeps/libwebsockets/libwebsockets.a(context.c.o): relocation R_X86_64_32 against '.rodata.str1.1' can not be used when making a PIE object; recompile with -fPIC`, and you are trying to build with `clang` on Linux, you will need to build Netdata using GCC to get a fully functional install.
+
+In most cases, you can do this by running `CC=gcc ./netdata-installer.sh`.
+
+
+### Perform a cleanup in your netdata repo
+
+The Netdata repo consist of the main git tree and it's submodules. Either working on a fork or on the main repo you need to make sure that there
+are no "leftover" artifacts from previous builds and that your submodules are up to date to the **corresponding checkouts**.
+
+> #### Important: Make sure that you have commited any work in progress, before you proceed the with the clean up instruction below
+
+
+```sh
+git clean -dfx && git submodule foreach 'git clean -dfx' && git submodule update --recursive --init
+```
+
+
+> Note: In previous builds, you may have created artifacts belonging to an another user (e.g root), so you may need to run
+> each of the _git clean_ commands as sudoer.
diff --git a/packaging/installer/methods/methods.md b/packaging/installer/methods/methods.md
new file mode 100644
index 00000000..f9ca2253
--- /dev/null
+++ b/packaging/installer/methods/methods.md
@@ -0,0 +1,26 @@
+<!--
+title: "Installation methods"
+description: "Netdata can be installed as a DEB/RPM package, a static binary, a docker container or from source"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/methods.md
+sidebar_label: "Installation methods"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 30
+-->
+
+# Installation methods
+
+Netdata can be installed:
+
+- [As a DEB/RPM package](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)
+- [As a static binary](https://github.com/netdata/netdata/blob/master/packaging/makeself/README.md)
+- [From a git checkout](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)
+- [As a docker container](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)
+
+The [one line installer kickstart.sh](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+picks the most appropriate method out of the first three for any system
+and is the recommended installation method, if you don't use containers.
+
+`kickstart.sh` can also be used for
+[offline installation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md),
+suitable for air-gapped systems.
diff --git a/packaging/installer/methods/offline.md b/packaging/installer/methods/offline.md
new file mode 100644
index 00000000..f2b6cc41
--- /dev/null
+++ b/packaging/installer/methods/offline.md
@@ -0,0 +1,59 @@
+<!--
+title: "Install Netdata on offline systems"
+description: "Install the Netdata Agent on offline/air gapped systems to benefit from real-time, per-second monitoring without connecting to the internet."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/offline.md"
+sidebar_label: "Offline systems"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 50
+-->
+
+# Install Netdata on offline systems
+
+Our kickstart install script provides support for installing the Netdata Agent on air-gapped systems which do not have a
+usable internet connection by prefetching all of the required files so that they can be copied to the target system.
+Currently, we only support using static installs with this method. There are tentative plans to support building
+locally on offline systems as well, but there is currently no estimate of when this functionality may be implemented.
+
+Users who wish to use native packages on offline systems may be able to do so using whatever tooling their
+distribution already provides for offline package management (such as `apt-offline` on Debian or Ubuntu systems),
+but this is not officially supported.
+
+## Preparing the offline installation source
+
+The first step to installing Netdata on an offline system is to prepare the offline installation source. This can
+be as a regular user from any internet connected system that has the following tools available:
+
+- cURL or wget
+- sha256sum or shasum
+- A standard POSIX compliant shell
+
+To prepare the offline installation source, simply run:
+
+```bash
+wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
+```
+
+or
+
+```bash
+curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
+```
+
+> The exact name used for the directory does not matter, you can specify any other name you want in place of `./netdata-offline`.
+
+This will create a directory called `netdata-offline` in the current directory and place all the files required for an offline install in it.
+
+If you want to use a specific release channel (nightly or stable), it _must_ be specified on this step using the
+appropriate option for the kickstart script.
+
+## Installing on the target system
+
+Once you have prepared the offline install source, you need to copy the offline install source directory to the
+target system. This can be done in any manner you like, as long as filenames are not changed.
+
+After copying the files, simply run the `install.sh` script located in the
+offline install source directory. It accepts all the [same options as the kickstart
+script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) for further
+customization of the installation, though it will default to not enabling automatic updates (as they are not
+supported on offline installs).
diff --git a/packaging/installer/methods/packages.md b/packaging/installer/methods/packages.md
new file mode 100644
index 00000000..d49e2139
--- /dev/null
+++ b/packaging/installer/methods/packages.md
@@ -0,0 +1,151 @@
+<!--
+title: "Install Netdata using native DEB/RPM packages."
+description: "Instructions for how to install Netdata using native DEB or RPM packages."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/packages.md"
+sidebar_label: "Native DEB/RPM packages"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 20
+-->
+
+# Install Netdata using native DEB/RPM packages.
+
+For most common Linux distributions that use either DEB or RPM packages, Netdata provides pre-built native packages
+for current releases in-line with
+our [official platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+These packages will be used by default when attempting to install on a supported platform using our
+[kickstart.sh installer script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+
+When using the kickstart script, you can force usage of native DEB or RPM packages by passing the option
+`--native-only` when invoking the script. This will cause it to only attempt to use native packages for the install,
+and fail if it cannot do so.
+
+
+
+> ### Note
+>
+> In July 2022, we switched hosting of our native packages from Package Cloud to self-hosted repositories.
+> We still maintain the Package cloud repositories, but they are not guaranteed to work and may be removed
+> without prior warning.
+>
+> When selecting a repository configuration package, note that the version 2 packages provide configuration for
+> our self-hosted repositories, and then version 1 packages provide configuration for Package Cloud.
+
+
+## Manual setup of RPM packages.
+
+Netdata’s official RPM repositories are hosted at https://repo.netdata.cloud/repos. We provide four groups of
+repositories at that top level:
+
+- `stable`: Contains packages for stable releases of the Netdata Agent.
+- `edge`: Contains packages for nightly builds of the Netdata Agent.
+- `repoconfig`: Provides packages that set up configuration files for using the other repositories.
+- `devel`: Is used for one-off development builds of the Netdata Agent, and can simply be ignored by users.
+
+Within each top level group of repositories, there are directories for each supported group of distributions:
+
+- `amazonlinux`: Is for Amazon Linux and binary compatible distros.
+- `el`: Is for Red Hat Enterprise Linux and binary compatible distros that are not covered by other repos, such
+ as CentOS, Alma Linux, and Rocky Linux.
+- `fedora`: Is for Fedora and binary compatible distros.
+- `ol`: Is for Oracle Linux and binary compatible distros.
+- `opensuse`: Is for openSUSE and binary compatible distros.
+
+Under each of those directories is a directory for each supported release of that distribution, and under that a
+directory for each supported CPU architecture which contains the actual repository.
+
+For example, for stable release packages for RHEL 9 on 64-bit x86, the full URL for the repository would be
+https://repo.netdata.cloud/repos/stable/el/9/x86_64/
+
+Our RPM packages and repository metadata are signed using a GPG key with a user name of ‘Netdatabot’. The
+current key fingerprint is `6588FDD7B14721FE7C3115E6F9177B5265F56346`. The associated public key can be fetched from
+`https://repo.netdata.cloud/netdatabot.gpg.key`.
+
+If you are explicitly configuring a system to use our repositories, the recommended setup is to download the
+appropriate repository configuration package from https://repo.netdata.cloud/repos/repoconfig and install it
+directly on the target system using the system package manager. This will ensure any packages needed to use the
+repository are also installed, and will help enable a seamless transition if we ever need to change our infrastructure.
+
+> ### Note
+>
+> On RHEL and other systems that use the `el` repostiroies, some of the dependencies for Netdata can only be found
+> in the EPEL repository, which is not enabled or installed by default on most of these systems. This additional
+> repository _should_ be pulled in automatically by our repository config packages, but if it is not you may need
+> to manually install `epel-release` to be able to successfully install the Netdata packages.
+
+## Manual setup of DEB packages.
+
+Netdata’s official DEB repositories are hosted at https://repo.netdata.cloud/repos. We provide four groups of
+repositories at that top level:
+
+- `stable`: Contains packages for stable releases of the Netdata Agent.
+- `edge`: Contains packages for nightly builds of the Netdata Agent.
+- `repoconfig`: Provides packages that set up configuration files for using the other repositories.
+- `devel`: Is used for one-off development builds of the Netdata Agent, and can simply be ignored by users.
+
+Within each top level group of repositories, there are directories for each supported group of distributions:
+
+- `debian`: Is for Debian Linux and binary compatible distros.
+- `ubuntu`: Is for Ubuntu Linux and binary compatible distros.
+
+Under each of these directories is a directory for each supported release, corresponding to the release codename.
+
+These repositories are set up as what Debian calls ‘flat repositories’, and are available via both HTTP and HTTPS.
+
+As a result of this structure, the required APT sources entry for stable packages for Debian 11 (Bullseye) is:
+
+```
+deb http://repo.netdata.cloud/repos/stable/debian/ bullseye/
+```
+
+Note the `/` at the end of the codename, this is required for the repository to be processed correctly.
+
+Our DEB packages and repository metadata are signed using a GPG key with a user name of ‘Netdatabot’. The
+current key fingerprint is `6588FDD7B14721FE7C3115E6F9177B5265F56346`. The associated public key can be fetched from
+`https://repo.netdata.cloud/netdatabot.gpg.key`.
+
+If you are explicitly configuring a system to use our repositories, the recommended setup is to download the
+appropriate repository configuration package from https://repo.netdata.cloud/repos/repoconfig and install it
+directly on the target system using the system package manager. This will ensure any packages needed to use the
+repository are also installed, and will help enable a seamless transition if we ever need to change our infrastructure.
+
+## Local mirrors of the official Netdata repositories
+
+Local mirrors of our official repositories can be created in one of two ways:
+
+1. Using the standard tooling for mirroring the type of repository you want a local mirror of, such as Aptly for
+ APT repositories, or reposync for RPM repositories. For this approach, please consult the documentation for
+ the specific tool you are using for info on how to mirror the repositories.
+2. Using a regular website mirroring tool, such as GNU wget’s `--mirror` option. For this approach, simply point
+ your mirroring tool at `https://repo.netdata.cloud/repos/`, and everything should just work.
+
+We do not provide official support for mirroring our repositories,
+but we do have some tips for anyone looking to do so:
+
+- Our `robots.txt` file explicitly disallows indexing, so if you’re using a regular website mirroring tool,
+ you wil need to tell it to ignore `robots.txt` (for example, if using GNU wget, add `-e robots=off` to the
+ options you pass) to ensure that it actually retrieves everything.
+- Excluding special cases of caching proxies (such as apt-cacher-ng), our repository configuration packages _DO NOT_
+ work with custom local mirrors. Thus, you will need to manually configure your systems to use your local mirror.
+- Packages are published as they are built, with 64-bit x86 packages being built first, followed by 32-bit x86,
+ and then non-x86 packages in alphabetical order of the CPU architecture. Because of the number of different
+ packages being built, this means that packages for a given nightly build or stable release are typically published
+ over the course of a few hours, usually starting about 15-20 minutes after the build or release is started.
+- Repository metadata is updated every hour on the hour, and the process may take anywhere from a few seconds to
+ more than 20 minutes. Because of this, it makes little sense to sync your mirror more frequently than once an hour,
+ and it’s generally preferred to start syncing at least 30 minutes into the hour.
+- A full mirror of all of our repositories currently requires up to 100 GB of storage space, though the exact
+ amount of space needed fluctuates over time. Because of this, users seeking to mirror our repositories are
+ encouraged to mirror only those repositories they actually need instead of mirroring everything.
+- If syncing daily (or less frequently), some time between 05:00 and 08:00 UTC each day is usually the saftest
+ time to do so, as publishing nightly packages will almost always be done by this point, and publishing of stable
+ releases typically happens after that time window.
+- If you intend to use our existing GPG signatures on the repository metadata and packages, you probably also want
+ a local copy of our public GPG key, which can be fetched from `https://repo.netdata.cloud/netdatabot.gpg.key`.
+
+## Public mirrors of the official Netdata repositories
+
+There are no official public mirrors of our repositories.
+
+If you wish to provide a public mirror of our official repositories, you are free to do so, but we kindly ask that
+you make it clear to your users that your mirror is not an official mirror of our repositories.
diff --git a/packaging/installer/methods/pfsense.md b/packaging/installer/methods/pfsense.md
new file mode 100644
index 00000000..965fba8d
--- /dev/null
+++ b/packaging/installer/methods/pfsense.md
@@ -0,0 +1,87 @@
+<!--
+title: "Install Netdata on pfSense"
+description: "Install Netdata on pfSense to monitor the health and performance of firewalls with thousands of real-time, per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/pfsense.md
+sidebar_label: "pfSense"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on pfSense CE
+
+> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the
+> details of the installation process, such as version numbers for downloadable packages, before proceeding.
+>
+> You can help improve this document by [submitting a
+> PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/pfsense.md) with your recommended
+> improvements or changes. Thank you!
+
+## Install prerequisites/dependencies
+
+To install Netdata on pfSense, first enable the [FreeBSD package repo](https://docs.netgate.com/pfsense/en/latest/recipes/freebsd-pkg-repo.html)
+Then run the following command (within a shell or under the **Diagnostics/Command**
+prompt within the pfSense web interface).
+
+```bash
+pkg install -y pkgconf bash e2fsprogs-libuuid libuv nano
+```
+
+Then run the following commands to download various dependencies from the FreeBSD repository.
+
+```sh
+pkg install json-c-0.15_1
+pkg install py39-certifi-2023.5.7
+pkg install py39-asn1crypto
+pkg install py39-pycparser
+pkg install py39-cffi
+pkg install py39-six
+pkg install py39-cryptography
+pkg install py39-idna
+pkg install py39-openssl
+pkg install py39-pysocks
+pkg install py39-urllib3
+pkg install py39-yaml
+```
+
+> ⚠️ If any of the above commands return a `Not Found` error, you need to manually search for the latest package in the
+> [FreeBSD repository](https://www.freebsd.org/ports/) or by running `pkg search`. Search for the package's name, such as `py37-cffi`, find the
+> latest version number, and update the command accordingly.
+
+> ⚠️ On pfSense 2.4.5, Python version 3.7 may be installed by the system, in which case you should should not install
+> Python from the FreeBSD repository as instructed above.
+
+> ⚠️ If you are using the `apcupsd` collector, you need to make sure that apcupsd is up before starting Netdata.
+> Otherwise a infinitely running `cat` process triggered by the default activated apcupsd charts plugin will eat up CPU
+> and RAM (`/tmp/.netdata-charts.d-*/run-*`). This also applies to `OPNsense`.
+
+## Install Netdata
+
+You can now install Netdata from the FreeBSD repository.
+
+```bash
+pkg install netdata
+```
+
+> ⚠️ If the above command returns a `Not Found` error, you need to manually search for the latest version of Netdata in
+> the [FreeBSD repository](https://www.freebsd.org/ports/). Search for `netdata`, find the latest version number, and
+> update the command accordingly.
+
+You must edit `/usr/local/etc/netdata/netdata.conf` and change `bind to = 127.0.0.1` to `bind to = 0.0.0.0`.
+
+To start Netdata manually, run `service netdata onestart`.
+
+Visit the Netdata dashboard to confirm it's working: `http://<pfsenseIP>:19999`
+
+To start Netdata automatically every boot, add `service netdata onestart` as a Shellcmd entry within the pfSense web
+interface under **Services/Shellcmd**. You'll need to install the Shellcmd package beforehand under **System/Package
+Manager/Available Packages**. The Shellcmd Type should be set to `Shellcmd`.
+![](https://i.imgur.com/wcKiPe1.png) Alternatively more information can be found in
+<https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages>, for achieving the same via the command line and
+scripts.
+
+If you experience an issue with `/usr/bin/install` being absent in pfSense 2.3 or earlier, update pfSense or use a
+workaround from <https://redmine.pfsense.org/issues/6643>
+
+**Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata`.
+
+
diff --git a/packaging/installer/methods/source.md b/packaging/installer/methods/source.md
new file mode 100644
index 00000000..8f34218a
--- /dev/null
+++ b/packaging/installer/methods/source.md
@@ -0,0 +1,244 @@
+<!--
+title: "Manually build Netdata from source"
+description: "Package maintainers and power users may be interested in manually building Netdata from source without using any of our installation scripts."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/source.md"
+sidebar_label: "Manually build Netdata from source"
+learn_status: "Published"
+learn_rel_path: "Installation/Package maintainers"
+sidebar_position: 100
+-->
+
+# Manually build Netdata from source
+
+These instructions are for advanced users and distribution package
+maintainers. Unless this describes you, you almost certainly want
+to follow [our guide for manually installing Netdata from a git
+checkout](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md) instead.
+
+## Required dependencies
+
+At a bare minimum, Netdata requires the following libraries and tools
+to build and run successfully:
+
+- libuuid
+- libuv version 1.0 or newer
+- zlib
+- GNU autoconf
+- GNU automake
+- GCC or Xcode (Clang is known to have issues in certain configurations, see [Using Clang](#using-clang))
+- A version of `make` compatible with GNU automake
+- Git (we use git in the build system to generate version info, don't need a full install, just a working `git show` command)
+
+Additionally, the following build time features require additional dependencies:
+
+- TLS support for the web GUI:
+ - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer.
+- dbengine metric storage:
+ - liblz4 r129 or newer
+ - OpenSSL 1.0 or newer (LibreSSL _amy_ work, but is largely untested).
+- Netdata Cloud support:
+ - A working internet connection
+ - A recent version of CMake
+ - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer.
+ - JSON-C (may be provided by the user as shown below, or by the system)
+ - protobuf (Google Protocol Buffers) and protoc compiler
+
+## Preparing the source tree
+
+Certain features in Netdata require custom versions of specific libraries,
+which the the build system will link statically into Netdata. These
+libraries and their header files must be copied into specific locations
+in the source tree to be used.
+
+Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
+Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+
+### Netdata cloud
+
+#### JSON-C
+
+Netdata requires the use of JSON-C for JSON parsing when using Netdata
+Cloud. Netdata is able to use a system-provided copy of JSON-C, but
+some systems may not provide it. If your system does not provide JSON-C,
+you can do the following to prepare a copy for the build system:
+
+1. Verify the tag that Netdata expects to be used by checking the contents
+ of `packaging/jsonc.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/json-c/json-c and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Prepare the JSON-C sources by running `cmake -DBUILD_SHARED_LIBS=OFF .`
+ in the JSON-C source directory.
+4. Build JSON-C by running `make` in the JSON-C source directory.
+5. In the Netdata source directory, create a directory called
+ `externaldeps/jsonc`.
+6. Copy `libjson-c.a` from the JSON-C source directory to
+ `externaldeps/jsonc/libjson-c.a` in the Netdata source tree.
+7. Copy all of the header files (`*.h`) from the JSON-C source directory
+ to `externaldeps/jsonc/json-c` in the Netdata source tree.
+
+## Building Netdata
+
+Once the source tree has been prepared, Netdata is ready to be configured
+and built. Netdata currently uses GNU autotools as it's primary build
+system. To build Netdata this way:
+
+1. Run `autoreconf -ivf` in the Netdata source tree.
+2. Run `./configure` in the Netdata source tree.
+3. Run `make` in the Netdata source tree.
+
+### Configure options
+
+Netdata provides a number of build time configure options. This section
+lists some of the ones you are most likely to need:
+
+- `--prefix`: Specify the prefix under which Netdata will be installed.
+- `--with-webdir`: Specify a path relative to the prefix in which to
+ install the web UI files.
+- `--disable-cloud`: Disables all Netdata Cloud functionality for
+ this build.
+
+### Using Clang
+
+Netdata is primarily developed using GCC, but in most cases we also
+build just fine using Clang. Under some build configurations of Clang
+itself, you may see build failures with the linker reporting errors
+about `nonrepresentable section on output`. We currently do not have a
+conclusive fix for this issue (the obvious fix leads to other issues which
+we haven't been able to fix yet), and unfortunately the only workaround
+is to use a different build of Clang or to use GCC.
+
+### Linking errors relating to OpenSSL
+
+Netdata's build system currently does not reliably support building
+on systems which have multiple ABI incompatible versions of OpenSSL
+installed. In such situations, you may encounter linking errors due to
+Netdata trying to build against headers for one version but link to a
+different version.
+
+## Additional components
+
+A full featured install of Netdata requires some additional components
+which must be built and installed separately from the main Netdata
+agent. All of these should be handled _after_ installing Netdata itself.
+
+### React dashboard
+
+The above build steps include a deprecated web UI for Netdata that lacks
+support for Netdata Cloud. To get a fully featured dashboard, you must
+install our new React dashboard.
+
+#### Installing the pre-built React dashboard
+
+We provide pre-built archives of the React dashboard for each release
+(these are also used during our normal install process). To use one
+of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/dashboard.version` in your Netdata sources.
+2. Go to https://github.com/netdata/dashboard/releases and download the
+ `dashboard.tar.gz` file for the required release.
+3. Unpack the downloaded archive to a temporary directory.
+4. Copy the contents of the `build` directory from the extracted
+ archive to `/usr/share/netdata/web` or the equivalent location for
+ your build of Netdata. This _will_ overwrite some files in the target
+ location.
+
+#### Building the React dashboard locally
+
+Alternatively, you may wish to build the React dashboard locally. Doing
+so requires a recent version of Node.JS with a working install of
+NPM. Once you have the required tools, do the following:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/dashboard.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/netdata/dashboard and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Run `npm install` in the dashboard source tree.
+4. Run `npm run build` in the dashboard source tree.
+5. Copy the contents of the `build` directory just like step 4 of
+ installing the pre-built React dashboard.
+
+### Go collectors
+
+A number of the collectors for Netdata are written in Go instead of C,
+and are developed in a separate repository from the mian Netdata code.
+An installation without these collectors is still usable, but will be
+unable to collect metrics for a number of network services the system
+may be providing. You can either install a pre-built copy of these
+collectors, or build them locally.
+
+#### Installing the pre-built Go collectors
+
+We provide pre-built binaries of the Go collectors for all the platforms
+we officially support. To use one of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/go.d.version` in your Netdata sources.
+2. Go to https://github.com/netdata/go.d.plugin/releases, select the
+ required release, and download the `go.d.plugin-*.tar.gz` file
+ for your system type and CPu architecture and the `config.tar.gz`
+ configuration file archive.
+3. Extract the `go.d.plugin-*.tar.gz` archive into a temporary
+ location, and then copy the single file in the archive to
+ `/usr/libexec/netdata/plugins.d` or the equivalent location for your
+ build of Netdata and rename it to `go.d.plugin`.
+4. Extract the `config.tar.gz` archive to a temporarylocation and then
+ copy the contents of the archive to `/etc/netdata` or the equivalent
+ location for your build of Netdata.
+
+#### Building the Go collectors locally
+
+Alternatively, you may wish to build the Go collectors locally
+yourself. Doing so requires a working installation of Golang 1.13 or
+newer. Once you have the required tools, do the following:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/go.d.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/netdata/go.d.plugin and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Run `make` in the go.d.plugin source tree.
+4. Copy `bin/godplugin` to `/usr/libexec/netdata/plugins.d` or th
+ equivalent location for your build of Netdata and rename it to
+ `go.d.plugin`.
+5. Copy the contents of the `config` directory to `/etc/netdata` or the
+ equivalent location for your build of Netdata.
+
+### eBPF collector
+
+On Linux systems, Netdata has support for using the kernel's eBPF
+interface to monitor performance-related VFS, network, and process events,
+allowing for insights into process lifetimes and file access
+patterns. Using this functionality requires additional code managed in
+a separate repository from the core Netdata agent. You can either install
+a pre-built copy of the required code, or build it locally.
+
+#### Installing the pre-built eBPF code
+
+We provide pre-built copies of the eBPF code for 64-bit x86 systems
+using glibc or musl. To use one of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/ebpf.version` in your Netdata sources.
+2. Go to https://github.com/netdata/kernel-collector/releases, select the
+ required release, and download the `netdata-kernel-collector-*.tar.xz`
+ file for the libc variant your system uses (either rmusl or glibc).
+3. Extract the contents of the archive to a temporary location, and then
+ copy all of the `.o` and `.so.*` files and the contents of the `library/`
+ directory to `/usr/libexec/netdata/plugins.d` or the equivalent location
+ for your build of Netdata.
+
+#### Building the eBPF code locally
+
+Alternatively, you may wish to build the eBPF code locally yourself. For
+instructions, please consult [the README file for our kernel-collector
+repository](https://github.com/netdata/kernel-collector/#readme),
+which outlines both the required dependencies, as well as multiple
+options for building the code.
+
+
diff --git a/packaging/installer/methods/synology.md b/packaging/installer/methods/synology.md
new file mode 100644
index 00000000..3910859b
--- /dev/null
+++ b/packaging/installer/methods/synology.md
@@ -0,0 +1,64 @@
+<!--
+title: "Install Netdata on Synology"
+description: "The Netdata Agent can be installed on AMD64-compatible NAS systems using the 64-bit pre-compiled static binary."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/synology.md
+sidebar_label: "Synology"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install Netdata on Synology
+
+> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the
+> details of the installation process, before proceeding.
+>
+> You can help improve this document by
+> [submitting a PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/synology.md)
+> with your recommended improvements or changes. Thank you!
+
+
+The good news is that our
+[one-line installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+works fine if your NAS is one that uses the amd64 architecture. It
+will install the content into `/opt/netdata`, making future removal safe and simple.
+
+## Run as netdata user
+
+When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other
+installations run it as the `netdata` user, you might wish to do the same. This requires some extra work:
+
+1. Create a group `netdata` via the Synology group interface. Give it no access to anything.
+2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign
+ the user to the `netdata` group. Netdata will chuid to this user when running.
+3. Change ownership of the following directories, as defined in
+ [Netdata Security](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#security-design):
+
+```sh
+chown -R root:netdata /opt/netdata/usr/share/netdata
+chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata
+chown -R netdata:root /opt/netdata/var/log/netdata
+```
+
+4. Restart Netdata
+
+```sh
+/etc/rc.netdata restart
+```
+
+## Create startup script
+
+Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is
+installed. You'll have to do this manually:
+
+1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it
+ executable with `chmod 0755 /etc/rc.netdata`.
+2. Add or edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot:
+
+```conf
+# Netdata startup
+[ -x /etc/rc.netdata ] && /etc/rc.netdata start
+```
+
+3. Make sure `/etc/rc.netdata` is executable: `chmod 0755 /etc/rc.netdata`.
+
+
diff --git a/packaging/installer/methods/systems.md b/packaging/installer/methods/systems.md
new file mode 100644
index 00000000..e53c4f4a
--- /dev/null
+++ b/packaging/installer/methods/systems.md
@@ -0,0 +1,18 @@
+<!--
+title: "Install on specific environments"
+description: "Netdata can be installed as a DEB/RPM package, a static binary, a docker container or from source"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/systems.md
+sidebar_label: "Install on specific environments"
+learn_status: "Published"
+learn_rel_path: "Installation/Install on specific environments"
+-->
+
+# Install on specific environments
+
+This category contains specific instructions for some popular environments.
+If you have a standard environment that is not yet listed here, just use the
+[one line installer kickstart.sh](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+
+If your environment is somewhat old or unusual, check our
+[platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+
diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh
new file mode 100755
index 00000000..4326ebe2
--- /dev/null
+++ b/packaging/installer/netdata-uninstaller.sh
@@ -0,0 +1,778 @@
+#!/bin/sh
+#
+# This is the netdata uninstaller script
+#
+# Variables needed by script and taken from '.environment' file:
+# - NETDATA_PREFIX
+# - NETDATA_ADDED_TO_GROUPS
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paweł Krupa <paulfantom@gmail.com>
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+#
+# Next unused error code: R0005
+
+usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything
+
+where:
+ -e, --env path to environment file (defaults to '/etc/netdata/.environment'
+ -f, --force force uninstallation and do not ask any questions
+ -h show this help text
+ -y, --yes flag needs to be set to proceed with uninstallation"
+
+FILE_REMOVAL_STATUS=0
+ENVIRONMENT_FILE="/etc/netdata/.environment"
+# shellcheck disable=SC2034
+INTERACTIVITY="-i"
+YES=0
+while :; do
+ case "$1" in
+ -h | --help)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ -f | --force)
+ INTERACTIVITY="-f"
+ shift
+ ;;
+ -y | --yes)
+ YES=1
+ FLAG=-y
+ shift
+ ;;
+ -e | --env)
+ ENVIRONMENT_FILE="$2"
+ shift 2
+ ;;
+ -*)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ *) break ;;
+ esac
+done
+
+if [ -n "${script_source}" ]; then
+ script_name="$(basename "${script_source}")"
+else
+ script_name="netdata-uninstaller.sh"
+fi
+
+info() {
+ echo >&2 "$(date) : INFO: ${script_name}: " "${1}"
+}
+
+error() {
+ echo >&2 "$(date) : ERROR: ${script_name}: " "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - ${1}"
+ fi
+}
+
+fatal() {
+ echo >&2 "$(date) : FATAL: ${script_name}: FAILED TO UNINSTALL NETDATA: " "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - ${1}"
+ fi
+ exit_reason "${1}" "${2}"
+ exit 1
+}
+
+exit_reason() {
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ EXIT_REASON="${1}"
+ EXIT_CODE="${2}"
+ if [ -n "${NETDATA_PROPAGATE_WARNINGS}" ]; then
+ export EXIT_REASON
+ export EXIT_CODE
+ export NETDATA_WARNINGS
+ fi
+ fi
+}
+
+if [ "$YES" != "1" ]; then
+ echo >&2 "This script will REMOVE netdata from your system."
+ echo >&2 "Run it again with --yes to do it."
+ exit_reason "User did not accept uninstalling." R0001
+ exit 1
+fi
+
+if [ "$(id -u)" -ne 0 ]; then
+ error "This script SHOULD be run as root or otherwise it won't delete all installed components."
+ key="n"
+ read -r 1 -p "Do you want to continue as non-root user [y/n] ? " key
+ if [ "$key" != "y" ] && [ "$key" != "Y" ]; then
+ exit_reason "User cancelled uninstall." R0002
+ exit 1
+ fi
+fi
+
+user_input() {
+ if [ "${INTERACTIVITY}" = "-i" ]; then
+ TEXT="$1 [y/n]"
+
+ while true; do
+ echo "$TEXT"
+ read -r yn
+
+ case "$yn" in
+ [Yy]*) return 0;;
+ [Nn]*) return 1;;
+ *) echo "Please answer yes or no.";;
+ esac
+ done
+ fi
+}
+
+_cannot_use_tmpdir() {
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ]; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}"; then
+ if chmod +x "${testfile}"; then
+ if [ "$("${testfile}")" = "SUCCESS" ]; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}"; then
+ if _cannot_use_tmpdir /tmp; then
+ if _cannot_use_tmpdir "${PWD}"; then
+ fatal "Unable to find a usable temporary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again." R0003
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+
+ mktemp -d -t netdata-uninstaller-XXXXXXXXXX
+}
+
+tmpdir="$(create_tmp_directory)"
+
+detect_existing_install() {
+ if pkg_installed netdata; then
+ ndprefix="/"
+ else
+ if [ -n "${INSTALL_PREFIX}" ]; then
+ searchpath="${INSTALL_PREFIX}/bin:${INSTALL_PREFIX}/sbin:${INSTALL_PREFIX}/usr/bin:${INSTALL_PREFIX}/usr/sbin:${PATH}"
+ searchpath="${INSTALL_PREFIX}/netdata/bin:${INSTALL_PREFIX}/netdata/sbin:${INSTALL_PREFIX}/netdata/usr/bin:${INSTALL_PREFIX}/netdata/usr/sbin:${searchpath}"
+ else
+ searchpath="${PATH}"
+ fi
+
+ ndpath="$(PATH="${searchpath}" command -v netdata 2>/dev/null)"
+
+ if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ]; then
+ ndpath="/opt/netdata/bin/netdata"
+ fi
+
+ if [ -n "${ndpath}" ]; then
+ ndprefix="$(dirname "$(dirname "${ndpath}")")"
+ fi
+
+ if echo "${ndprefix}" | grep -Eq '/usr$'; then
+ ndprefix="$(dirname "${ndprefix}")"
+ fi
+ fi
+
+ if [ -n "${ndprefix}" ]; then
+ typefile="${ndprefix}/etc/netdata/.install-type"
+ envfile="${ndprefix}/etc/netdata/.environment"
+ if [ -r "${typefile}" ]; then
+ ${ROOTCMD} sh -c "cat \"${typefile}\" > \"${tmpdir}/install-type\""
+ # shellcheck disable=SC1090,SC1091
+ . "${tmpdir}/install-type"
+ else
+ INSTALL_TYPE="unknown"
+ fi
+
+ if [ "${INSTALL_TYPE}" = "unknown" ] || [ "${INSTALL_TYPE}" = "custom" ]; then
+ if [ -r "${envfile}" ]; then
+ ${ROOTCMD} sh -c "cat \"${envfile}\" > \"${tmpdir}/environment\""
+ # shellcheck disable=SC1091
+ . "${tmpdir}/environment"
+ if [ -n "${NETDATA_IS_STATIC_INSTALL}" ]; then
+ if [ "${NETDATA_IS_STATIC_INSTALL}" = "yes" ]; then
+ INSTALL_TYPE="legacy-static"
+ else
+ INSTALL_TYPE="legacy-build"
+ fi
+ fi
+ fi
+ fi
+ fi
+}
+
+pkg_installed() {
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ dpkg-query --show --showformat '${Status}' "${1}" 2>&1 | cut -f 1 -d ' ' | grep -q '^install$'
+ return $?
+ ;;
+ centos|fedora|opensuse|ol)
+ rpm -q "${1}" > /dev/null 2>&1
+ return $?
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+}
+
+detect_existing_install
+
+if [ -x "$(command -v apt-get)" ] && [ "${INSTALL_TYPE}" = "binpkg-deb" ]; then
+ if dpkg -s netdata > /dev/null; then
+ echo "Found netdata native installation"
+ if user_input "Do you want to remove netdata? "; then
+ # shellcheck disable=SC2086
+ apt-get remove netdata ${FLAG}
+ fi
+ if dpkg -s netdata-repo-edge > /dev/null; then
+ if user_input "Do you want to remove netdata-repo-edge? "; then
+ # shellcheck disable=SC2086
+ apt-get remove netdata-repo-edge ${FLAG}
+ fi
+ fi
+ if dpkg -s netdata-repo > /dev/null; then
+ if user_input "Do you want to remove netdata-repo? "; then
+ # shellcheck disable=SC2086
+ apt-get remove netdata-repo ${FLAG}
+ fi
+ fi
+ exit 0
+ fi
+elif [ -x "$(command -v dnf)" ] && [ "${INSTALL_TYPE}" = "binpkg-rpm" ]; then
+ if rpm -q netdata > /dev/null; then
+ echo "Found netdata native installation."
+ if user_input "Do you want to remove netdata? "; then
+ # shellcheck disable=SC2086
+ dnf remove netdata ${FLAG}
+ fi
+ if rpm -q netdata-repo-edge > /dev/null; then
+ if user_input "Do you want to remove netdata-repo-edge? "; then
+ # shellcheck disable=SC2086
+ dnf remove netdata-repo-edge ${FLAG}
+ fi
+ fi
+ if rpm -q netdata-repo > /dev/null; then
+ if user_input "Do you want to remove netdata-repo? "; then
+ # shellcheck disable=SC2086
+ dnf remove netdata-repo ${FLAG}
+ fi
+ fi
+ exit 0
+ fi
+elif [ -x "$(command -v yum)" ] && [ "${INSTALL_TYPE}" = "binpkg-rpm" ]; then
+ if rpm -q netdata > /dev/null; then
+ echo "Found netdata native installation."
+ if user_input "Do you want to remove netdata? "; then
+ # shellcheck disable=SC2086
+ yum remove netdata ${FLAG}
+ fi
+ if rpm -q netdata-repo-edge > /dev/null; then
+ if user_input "Do you want to remove netdata-repo-edge? "; then
+ # shellcheck disable=SC2086
+ yum remove netdata-repo-edge ${FLAG}
+ fi
+ fi
+ if rpm -q netdata-repo > /dev/null; then
+ if user_input "Do you want to remove netdata-repo? "; then
+ # shellcheck disable=SC2086
+ yum remove netdata-repo ${FLAG}
+ fi
+ fi
+ exit 0
+ fi
+elif [ -x "$(command -v zypper)" ] && [ "${INSTALL_TYPE}" = "binpkg-rpm" ]; then
+ if [ "${FLAG}" = "-y" ]; then
+ FLAG=-n
+ fi
+ if zypper search -i netdata > /dev/null; then
+ echo "Found netdata native installation."
+ if user_input "Do you want to remove netdata? "; then
+ # shellcheck disable=SC2086
+ zypper ${FLAG} remove netdata
+ fi
+ if zypper search -i netdata-repo-edge > /dev/null; then
+ if user_input "Do you want to remove netdata-repo-edge? "; then
+ # shellcheck disable=SC2086
+ zypper ${FLAG} remove netdata-repo-edge
+ fi
+ fi
+ if zypper search -i netdata-repo > /dev/null; then
+ if user_input "Do you want to remove netdata-repo? "; then
+ # shellcheck disable=SC2086
+ zypper ${FLAG} remove netdata-repo
+ fi
+ fi
+ exit 0
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+# portable service command
+
+service_cmd="$(command -v service 2> /dev/null)"
+rcservice_cmd="$(command -v rc-service 2> /dev/null)"
+systemctl_cmd="$(command -v systemctl 2> /dev/null)"
+service() {
+
+ cmd="${1}"
+ action="${2}"
+
+ if [ -n "${systemctl_cmd}" ]; then
+ run "${systemctl_cmd}" "${action}" "${cmd}"
+ return $?
+ elif [ -n "${service_cmd}" ]; then
+ run "${service_cmd}" "${cmd}" "${action}"
+ return $?
+ elif [ -n "${rcservice_cmd}" ]; then
+ run "${rcservice_cmd}" "${cmd}" "${action}"
+ return $?
+ fi
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_YELLOW=""
+ TPUT_WHITE=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput 1> /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+ESCAPED_PRINT_METHOD=
+if printf "%s " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%s " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ user="${USER--}"
+ dir="${PWD}"
+
+ if [ "$(id -u)" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "%s" "${info}"
+ escaped_print "${@}"
+ printf "%s" " ... "
+ } >> "${run_logfile}"
+
+ printf "%s" "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" >&2
+ escaped_print >&2 "${@}"
+ printf "%s\n" "${TPUT_RESET}" >&2
+
+ "${@}"
+
+ ret=$?
+ if [ ${ret} -ne 0 ]; then
+ printf >&2 "%s FAILED %s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}"
+ printf >> "${run_logfile}" "FAILED with exit code %s\n" "${ret}"
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - Command \"${*}\" failed with exit code ${ret}."
+ else
+ printf >&2 "%s OK %s\n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}"
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+portable_del_group() {
+ groupname="${1}"
+
+ # Check if group exist
+ info "Removing ${groupname} user group ..."
+
+ # Linux
+ if command -v groupdel 1> /dev/null 2>&1; then
+ if get_group "${groupname}" > /dev/null 2>&1; then
+ run groupdel "${groupname}" && return 0
+ else
+ info "Group ${groupname} already removed in a previous step."
+ return 0
+ fi
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ if dseditgroup -o read netdata 1> /dev/null 2>&1; then
+ run dseditgroup -o delete "${groupname}" && return 0
+ else
+ info "Could not find group ${groupname}, nothing to do"
+ return 0
+ fi
+ fi
+
+ error "Group ${groupname} was not automatically removed, you might have to remove it manually"
+ return 1
+}
+
+issystemd() {
+ pids=''
+ p=''
+ myns=''
+ ns=''
+ systemctl=''
+
+ # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd
+ if [ ! -d /lib/systemd/system ] && [ ! -d /usr/lib/systemd/system ]; then
+ return 1
+ fi
+
+ # if there is no systemctl command, it is not systemd
+ systemctl=$(command -v systemctl 2> /dev/null)
+ if [ -z "${systemctl}" ] || [ ! -x "${systemctl}" ]; then
+ return 1
+ fi
+
+ # if pid 1 is systemd, it is systemd
+ [ "$(basename "$(readlink /proc/1/exe)" 2> /dev/null)" = "systemd" ] && return 0
+
+ # if systemd is not running, it is not systemd
+ pids=$(safe_pidof systemd 2> /dev/null)
+ [ -z "${pids}" ] && return 1
+
+ # check if the running systemd processes are not in our namespace
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+ for p in ${pids}; do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ # if pid of systemd is in our namespace, it is systemd
+ [ -n "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0
+ done
+
+ # else, it is not systemd
+ return 1
+}
+
+portable_del_user() {
+ username="${1}"
+ info "Deleting ${username} user account ..."
+
+ # Linux
+ if command -v userdel 1> /dev/null 2>&1; then
+ run userdel -f "${username}" && return 0
+ fi
+
+ # mac OS
+ if command -v sysadminctl 1> /dev/null 2>&1; then
+ run sysadminctl -deleteUser "${username}" && return 0
+ fi
+
+ error "User ${username} could not be deleted from system, you might have to remove it manually"
+ return 1
+}
+
+portable_del_user_from_group() {
+ groupname="${1}"
+ username="${2}"
+
+ # username is not in group
+ info "Deleting ${username} user from ${groupname} group ..."
+
+ # Linux
+ if command -v gpasswd 1> /dev/null 2>&1; then
+ run gpasswd -d "netdata" "${group}" && return 0
+ fi
+
+ # FreeBSD
+ if command -v pw 1> /dev/null 2>&1; then
+ run pw groupmod "${groupname}" -d "${username}" && return 0
+ fi
+
+ # BusyBox
+ if command -v delgroup 1> /dev/null 2>&1; then
+ run delgroup "${username}" "${groupname}" && return 0
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ run dseditgroup -o delete -u "${username}" "${groupname}" && return 0
+ fi
+
+ error "Failed to delete user ${username} from group ${groupname} !"
+ return 1
+}
+
+quit_msg() {
+ echo
+ if [ "$FILE_REMOVAL_STATUS" -eq 0 ]; then
+ fatal "Failed to completely remove Netdata from this system." R0004
+ else
+ info "Netdata files were successfully removed from your system"
+ fi
+}
+
+rm_file() {
+ FILE="$1"
+ if [ -f "${FILE}" ]; then
+ if user_input "Do you want to delete this file '$FILE' ? "; then
+ run rm -v "${FILE}"
+ fi
+ fi
+}
+
+rm_dir() {
+ DIR="$1"
+ if [ -n "$DIR" ] && [ -d "$DIR" ]; then
+ if user_input "Do you want to delete this directory '$DIR' ? "; then
+ run rm -v -f -R "${DIR}"
+ fi
+ fi
+}
+
+safe_pidof() {
+ pidof_cmd="$(command -v pidof 2> /dev/null)"
+ if [ -n "${pidof_cmd}" ]; then
+ ${pidof_cmd} "${@}"
+ return $?
+ else
+ ps -acxo pid,comm |
+ sed "s/^ *//g" |
+ grep netdata |
+ cut -d ' ' -f 1
+ return $?
+ fi
+}
+
+pidisnetdata() {
+ if [ -d /proc/self ]; then
+ if [ -z "$1" ] || [ ! -f "/proc/$1/stat" ]; then
+ return 1
+ fi
+ [ "$(cut -d '(' -f 2 "/proc/$1/stat" | cut -d ')' -f 1)" = "netdata" ] && return 0
+ return 1
+ fi
+ return 0
+}
+
+stop_netdata_on_pid() {
+ pid="${1}"
+ ret=0
+ count=0
+
+ pidisnetdata "${pid}" || return 0
+
+ info "Stopping netdata on pid ${pid} ..."
+ while [ -n "$pid" ] && [ ${ret} -eq 0 ]; do
+ if [ ${count} -gt 24 ]; then
+ error "Cannot stop the running netdata on pid ${pid}."
+ return 1
+ fi
+
+ count=$((count + 1))
+
+ pidisnetdata "${pid}" || ret=1
+ if [ ${ret} -eq 1 ]; then
+ break
+ fi
+
+ if [ ${count} -lt 12 ]; then
+ run kill "${pid}" 2> /dev/null
+ ret=$?
+ else
+ run kill -9 "${pid}" 2> /dev/null
+ ret=$?
+ fi
+
+ test ${ret} -eq 0 && printf >&2 "." && sleep 5
+
+ done
+
+ echo >&2
+ if [ ${ret} -eq 0 ]; then
+ error "SORRY! CANNOT STOP netdata ON PID ${pid} !"
+ return 1
+ fi
+
+ info "netdata on pid ${pid} stopped."
+ return 0
+}
+
+netdata_pids() {
+ p=''
+ ns=''
+
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+
+ for p in \
+ $(cat /var/run/netdata.pid 2> /dev/null) \
+ $(cat /var/run/netdata/netdata.pid 2> /dev/null) \
+ $(safe_pidof netdata 2> /dev/null); do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ if [ -z "${myns}" ] || [ -z "${ns}" ] || [ "${myns}" = "${ns}" ]; then
+ pidisnetdata "${p}" && echo "${p}"
+ fi
+ done
+}
+
+stop_all_netdata() {
+ p=''
+ stop_success=0
+
+ if [ "$(id -u)" -eq 0 ]; then
+ uname="$(uname 2> /dev/null)"
+
+ # Any of these may fail, but we need to not bail if they do.
+ if issystemd; then
+ if systemctl stop netdata; then
+ stop_success=1
+ sleep 5
+ fi
+ elif [ "${uname}" = "Darwin" ]; then
+ if launchctl stop netdata; then
+ stop_success=1
+ sleep 5
+ fi
+ elif [ "${uname}" = "FreeBSD" ]; then
+ if /etc/rc.d/netdata stop; then
+ stop_success=1
+ sleep 5
+ fi
+ else
+ if service netdata stop; then
+ stop_success=1
+ sleep 5
+ fi
+ fi
+ fi
+
+ if [ "$stop_success" = "0" ]; then
+ if [ -n "$(netdata_pids)" ] && [ -n "$(command -v netdatacli)" ]; then
+ netdatacli shutdown-agent
+ sleep 20
+ fi
+
+ for p in $(netdata_pids); do
+ # shellcheck disable=SC2086
+ stop_netdata_on_pid ${p}
+ done
+ fi
+}
+
+trap quit_msg EXIT
+
+# shellcheck source=/dev/null
+# shellcheck disable=SC1090
+. "${ENVIRONMENT_FILE}" || exit 1
+
+#### STOP NETDATA
+info "Stopping a possibly running netdata..."
+stop_all_netdata
+
+#### REMOVE NETDATA FILES
+rm_file /etc/logrotate.d/netdata
+rm_file /etc/systemd/system/netdata.service
+rm_file /lib/systemd/system/netdata.service
+rm_file /usr/lib/systemd/system/netdata.service
+rm_file /etc/systemd/system/netdata-updater.service
+rm_file /lib/systemd/system/netdata-updater.service
+rm_file /usr/lib/systemd/system/netdata-updater.service
+rm_file /etc/systemd/system/netdata-updater.timer
+rm_file /lib/systemd/system/netdata-updater.timer
+rm_file /usr/lib/systemd/system/netdata-updater.timer
+rm_file /usr/lib/systemd/system-preset/50-netdata.preset
+rm_file /lib/systemd/system-preset/50-netdata.preset
+rm_file /etc/init.d/netdata
+rm_file /etc/periodic/daily/netdata-updater
+rm_file /etc/cron.daily/netdata-updater
+rm_file /etc/cron.d/netdata-updater
+rm_file /etc/cron.d/netdata-updater-daily
+
+
+if [ -n "${NETDATA_PREFIX}" ] && [ -d "${NETDATA_PREFIX}" ] && [ "netdata" = "$(basename "$NETDATA_PREFIX")" ] ; then
+ rm_dir "${NETDATA_PREFIX}"
+else
+ rm_file "${NETDATA_PREFIX}/usr/sbin/netdata"
+ rm_file "${NETDATA_PREFIX}/usr/sbin/netdatacli"
+ rm_file "${NETDATA_PREFIX}/usr/sbin/netdata-claim.sh"
+ rm_file "${NETDATA_PREFIX}/usr/sbin/log2journal"
+ rm_file "${NETDATA_PREFIX}/usr/sbin/systemd-cat-native"
+ rm_file "/tmp/netdata-ipc"
+ rm_file "/tmp/netdata-service-cmds"
+ rm_dir "${NETDATA_PREFIX}/usr/share/netdata"
+ rm_dir "${NETDATA_PREFIX}/usr/libexec/netdata"
+ rm_dir "${NETDATA_PREFIX}/var/lib/netdata"
+ rm_dir "${NETDATA_PREFIX}/var/cache/netdata"
+ rm_dir "${NETDATA_PREFIX}/var/log/netdata"
+ rm_dir "${NETDATA_PREFIX}/etc/netdata"
+fi
+
+if [ -n "${tmpdir}" ]; then
+ run rm -rf "${tmpdir}" || true
+fi
+
+FILE_REMOVAL_STATUS=1
+
+#### REMOVE NETDATA USER FROM ADDED GROUPS
+if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then
+ if user_input "Do you want to delete 'netdata' from following groups: '$NETDATA_ADDED_TO_GROUPS' ? "; then
+ for group in $NETDATA_ADDED_TO_GROUPS; do
+ portable_del_user_from_group "${group}" "netdata"
+ done
+ fi
+fi
+
+#### REMOVE USER
+if user_input "Do you want to delete 'netdata' system user ? "; then
+ portable_del_user "netdata" || :
+fi
+
+### REMOVE GROUP
+if user_input "Do you want to delete 'netdata' system group ? "; then
+ portable_del_group "netdata" || :
+fi
diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh
new file mode 100755
index 00000000..80faea0a
--- /dev/null
+++ b/packaging/installer/netdata-updater.sh
@@ -0,0 +1,990 @@
+#!/bin/sh
+
+# Netdata updater utility
+#
+# Variables needed by script:
+# - PATH
+# - CFLAGS
+# - LDFLAGS
+# - MAKEOPTS
+# - IS_NETDATA_STATIC_BINARY
+# - NETDATA_CONFIGURE_OPTIONS
+# - REINSTALL_OPTIONS
+# - NETDATA_TARBALL_URL
+# - NETDATA_TARBALL_CHECKSUM_URL
+# - NETDATA_TARBALL_CHECKSUM
+# - NETDATA_PREFIX
+# - NETDATA_LIB_DIR
+#
+# Optional environment options:
+#
+# - TMPDIR (set to a usable temporary directory)
+# - NETDATA_NIGHTLIES_BASEURL (set the base url for downloading the dist tarball)
+#
+# Copyright: 2018-2023 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paweł Krupa <paulfantom@gmail.com>
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+# Author: Austin S. Hemmelgarn <austin@netdata.cloud>
+
+# Next unused error code: U001B
+
+set -e
+
+PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"
+
+NETDATA_STABLE_BASE_URL="${NETDATA_BASE_URL:-https://github.com/netdata/netdata/releases}"
+NETDATA_NIGHTLY_BASE_URL="${NETDATA_BASE_URL:-https://github.com/netdata/netdata-nightlies/releases}"
+
+# Following variables are intended to be overridden by the updater config file.
+NETDATA_UPDATER_JITTER=3600
+NETDATA_NO_SYSTEMD_JOURNAL=0
+
+script_dir="$(CDPATH='' cd -- "$(dirname -- "$0")" && pwd -P)"
+
+if [ -x "${script_dir}/netdata-updater" ]; then
+ script_source="${script_dir}/netdata-updater"
+else
+ script_source="${script_dir}/netdata-updater.sh"
+fi
+
+PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
+
+if [ ! -t 1 ]; then
+ INTERACTIVE=0
+else
+ INTERACTIVE=1
+fi
+
+if [ -n "${script_source}" ]; then
+ script_name="$(basename "${script_source}")"
+else
+ script_name="netdata-updater.sh"
+fi
+
+info() {
+ echo >&3 "$(date) : INFO: ${script_name}: " "${1}"
+}
+
+warning() {
+ echo >&3 "$(date) : WARNING: ${script_name}: " "${@}"
+}
+
+error() {
+ echo >&3 "$(date) : ERROR: ${script_name}: " "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - ${1}"
+ fi
+}
+
+fatal() {
+ echo >&3 "$(date) : FATAL: ${script_name}: FAILED TO UPDATE NETDATA: " "${1}"
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ NETDATA_WARNINGS="${NETDATA_WARNINGS}\n - ${1}"
+ fi
+ exit_reason "${1}" "${2}"
+ exit 1
+}
+
+exit_reason() {
+ if [ -n "${NETDATA_SAVE_WARNINGS}" ]; then
+ EXIT_REASON="${1}"
+ EXIT_CODE="${2}"
+ if [ -n "${NETDATA_PROPAGATE_WARNINGS}" ]; then
+ if [ -n "${NETDATA_SCRIPT_STATUS_PATH}" ]; then
+ {
+ echo "EXIT_REASON=\"${EXIT_REASON}\""
+ echo "EXIT_CODE=\"${EXIT_CODE}\""
+ echo "NETDATA_WARNINGS=\"${NETDATA_WARNINGS}\""
+ } >> "${NETDATA_SCRIPT_STATUS_PATH}"
+ else
+ export EXIT_REASON
+ export EXIT_CODE
+ export NETDATA_WARNINGS
+ fi
+ fi
+ fi
+}
+
+is_integer () {
+ case "${1#[+-]}" in
+ *[!0123456789]*) return 1 ;;
+ '') return 1 ;;
+ *) return 0 ;;
+ esac
+}
+
+issystemd() {
+ # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd
+ if [ ! -d /lib/systemd/system ] && [ ! -d /usr/lib/systemd/system ]; then
+ return 1
+ fi
+
+ # if there is no systemctl command, it is not systemd
+ systemctl=$(command -v systemctl 2> /dev/null)
+ if [ -z "${systemctl}" ] || [ ! -x "${systemctl}" ]; then
+ return 1
+ fi
+
+ # if pid 1 is systemd, it is systemd
+ [ "$(basename "$(readlink /proc/1/exe)" 2> /dev/null)" = "systemd" ] && return 0
+
+ # if systemd is not running, it is not systemd
+ pids=$(safe_pidof systemd 2> /dev/null)
+ [ -z "${pids}" ] && return 1
+
+ # check if the running systemd processes are not in our namespace
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+ for p in ${pids}; do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ # if pid of systemd is in our namespace, it is systemd
+ [ -n "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0
+ done
+
+ # else, it is not systemd
+ return 1
+}
+
+_get_intervaldir() {
+ if [ -d /etc/cron.daily ]; then
+ echo /etc/cron.daily
+ elif [ -d /etc/periodic/daily ]; then
+ echo /etc/periodic/daily
+ else
+ return 1
+ fi
+
+ return 0
+}
+
+_get_scheduler_type() {
+ if _get_intervaldir > /dev/null ; then
+ echo 'interval'
+ elif issystemd ; then
+ echo 'systemd'
+ elif [ -d /etc/cron.d ] ; then
+ echo 'crontab'
+ else
+ echo 'none'
+ fi
+}
+
+install_build_dependencies() {
+ bash="$(command -v bash 2> /dev/null)"
+
+ if [ -z "${bash}" ] || [ ! -x "${bash}" ]; then
+ error "Unable to find a usable version of \`bash\` (required for local build)."
+ return 1
+ fi
+
+ info "Fetching dependency handling script..."
+ download "${PACKAGES_SCRIPT}" "./install-required-packages.sh" || true
+
+ if [ ! -s "./install-required-packages.sh" ]; then
+ error "Downloaded dependency installation script is empty."
+ else
+ info "Running dependency handling script..."
+
+ opts="--dont-wait --non-interactive"
+
+ # shellcheck disable=SC2086
+ if ! "${bash}" "./install-required-packages.sh" ${opts} netdata >&3 2>&3; then
+ error "Installing build dependencies failed. The update should still work, but you might be missing some features."
+ fi
+ fi
+}
+
+enable_netdata_updater() {
+ updater_type="$(echo "${1}" | tr '[:upper:]' '[:lower:]')"
+ case "${updater_type}" in
+ systemd|interval|crontab)
+ updater_type="${1}"
+ ;;
+ "")
+ updater_type="$(_get_scheduler_type)"
+ ;;
+ *)
+ fatal "Unrecognized updater type ${updater_type} requested. Supported types are 'systemd', 'interval', and 'crontab'." U0001
+ ;;
+ esac
+
+ case "${updater_type}" in
+ "systemd")
+ if issystemd; then
+ systemctl enable netdata-updater.timer
+
+ info "Auto-updating has been ENABLED using a systemd timer unit.\n"
+ info "If the update process fails, the failure will be logged to the systemd journal just like a regular service failure."
+ info "Successful updates should produce empty logs."
+ else
+ error "Systemd-based auto-update scheduling requested, but this does not appear to be a systemd system. Auto-updates have NOT been enabled."
+ return 1
+ fi
+ ;;
+ "interval")
+ if _get_intervaldir > /dev/null; then
+ ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" "$(_get_intervaldir)/netdata-updater"
+
+ info "Auto-updating has been ENABLED through cron, updater script linked to $(_get_intervaldir)/netdata-updater\n"
+ info "If the update process fails and you have email notifications set up correctly for cron on this system, you should receive an email notification of the failure."
+ info "Successful updates will not send an email."
+ else
+ error "Interval-based auto-update scheduling requested, but I could not find an interval scheduling directory. Auto-updates have NOT been enabled."
+ return 1
+ fi
+ ;;
+ "crontab")
+ if [ -d "/etc/cron.d" ]; then
+ [ -f "/etc/cron.d/netdata-updater" ] && rm -f "/etc/cron.d/netdata-updater"
+ install -p -m 0644 -o 0 -g 0 "${NETDATA_PREFIX}/usr/lib/system/cron/netdata-updater-daily" "/etc/cron.d/netdata-updater-daily"
+
+ info "Auto-updating has been ENABLED through cron, using a crontab at /etc/cron.d/netdata-updater\n"
+ info "If the update process fails and you have email notifications set up correctly for cron on this system, you should receive an email notification of the failure."
+ info "Successful updates will not send an email."
+ else
+ error "Crontab-based auto-update scheduling requested, but there is no '/etc/cron.d'. Auto-updates have NOT been enabled."
+ return 1
+ fi
+ ;;
+ *)
+ error "Unable to determine what type of auto-update scheduling to use. Auto-updates have NOT been enabled."
+ return 1
+ esac
+
+ return 0
+}
+
+disable_netdata_updater() {
+ if issystemd && ( systemctl list-units --full -all | grep -Fq "netdata-updater.timer" ) ; then
+ systemctl disable netdata-updater.timer
+ fi
+
+ if [ -d /etc/cron.daily ]; then
+ rm -f /etc/cron.daily/netdata-updater.sh
+ rm -f /etc/cron.daily/netdata-updater
+ fi
+
+ if [ -d /etc/periodic/daily ]; then
+ rm -f /etc/periodic/daily/netdata-updater.sh
+ rm -f /etc/periodic/daily/netdata-updater
+ fi
+
+ if [ -d /etc/cron.d ]; then
+ rm -f /etc/cron.d/netdata-updater
+ rm -f /etc/cron.d/netdata-updater-daily
+ fi
+
+ info "Auto-updates have been DISABLED."
+
+ return 0
+}
+
+str_in_list() {
+ printf "%s\n" "${2}" | tr ' ' "\n" | grep -qE "^${1}\$"
+ return $?
+}
+
+safe_sha256sum() {
+ # Within the context of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ elif command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use" U0002
+ fi
+}
+
+cleanup() {
+ if [ -n "${logfile}" ]; then
+ cat >&2 "${logfile}"
+ rm "${logfile}"
+ fi
+
+ if [ -n "$ndtmpdir" ] && [ -d "$ndtmpdir" ]; then
+ rm -rf "$ndtmpdir"
+ fi
+}
+
+_cannot_use_tmpdir() {
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ] ; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}" ; then
+ if chmod +x "${testfile}" ; then
+ if [ "$("${testfile}" 2>/dev/null)" = "SUCCESS" ] ; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -n "${NETDATA_TMPDIR_PATH}" ]; then
+ echo "${NETDATA_TMPDIR_PATH}"
+ else
+ if [ -z "${NETDATA_TMPDIR}" ] || _cannot_use_tmpdir "${NETDATA_TMPDIR}" ; then
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}" ; then
+ if _cannot_use_tmpdir /tmp ; then
+ if _cannot_use_tmpdir "${PWD}" ; then
+ fatal "Unable to find a usable temporary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again." U0003
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+ else
+ TMPDIR="${NETDATA_TMPDIR}"
+ fi
+
+ mktemp -d -t netdata-updater-XXXXXXXXXX
+ fi
+}
+
+check_for_curl() {
+ if [ -z "${curl}" ]; then
+ curl="$(PATH="${PATH}:/opt/netdata/bin" command -v curl 2>/dev/null && true)"
+ fi
+}
+
+_safe_download() {
+ url="${1}"
+ dest="${2}"
+
+ check_for_curl
+
+ if [ -n "${curl}" ]; then
+ "${curl}" -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
+ return $?
+ elif command -v wget > /dev/null 2>&1; then
+ wget -T 15 -O - "${url}" > "${dest}"
+ return $?
+ else
+ return 255
+ fi
+}
+
+download() {
+ url="${1}"
+ dest="${2}"
+
+ _safe_download "${url}" "${dest}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ return 0
+ elif [ ${ret} -eq 255 ]; then
+ fatal "I need curl or wget to proceed, but neither is available on this system." U0004
+ else
+ fatal "Cannot download ${url}" U0005
+ fi
+}
+
+get_netdata_latest_tag() {
+ url="${1}/latest"
+ dest="${2}"
+
+ check_for_curl
+
+ if [ -n "${curl}" ]; then
+ tag=$("${curl}" "${url}" -s -L -I -o /dev/null -w '%{url_effective}' | grep -m 1 -o '[^/]*$')
+ elif command -v wget >/dev/null 2>&1; then
+ tag=$(wget -S -O /dev/null "${url}" 2>&1 | grep -m 1 Location | grep -o '[^/]*$')
+ else
+ fatal "I need curl or wget to proceed, but neither of them are available on this system." U0006
+ fi
+
+ echo "${tag}" >"${dest}"
+}
+
+newer_commit_date() {
+ info "Checking if a newer version of the updater script is available."
+
+ commit_check_url="https://api.github.com/repos/netdata/netdata/commits?path=packaging%2Finstaller%2Fnetdata-updater.sh&page=1&per_page=1"
+ python_version_check="from __future__ import print_function;import sys,json;data = json.load(sys.stdin);print(data[0]['commit']['committer']['date'] if isinstance(data, list) else '')"
+
+ if command -v jq > /dev/null 2>&1; then
+ commit_date="$(_safe_download "${commit_check_url}" /dev/stdout | jq '.[0].commit.committer.date' 2>/dev/null | tr -d '"')"
+ elif command -v python > /dev/null 2>&1;then
+ commit_date="$(_safe_download "${commit_check_url}" /dev/stdout | python -c "${python_version_check}")"
+ elif command -v python3 > /dev/null 2>&1;then
+ commit_date="$(_safe_download "${commit_check_url}" /dev/stdout | python3 -c "${python_version_check}")"
+ fi
+
+ if [ -z "${commit_date}" ] ; then
+ return 0
+ elif [ "$(uname)" = "Linux" ]; then
+ commit_date="$(date -d "${commit_date}" +%s)"
+ else # assume BSD-style `date` if we are not on Linux
+ commit_date="$(/bin/date -j -f "%Y-%m-%dT%H:%M:%SZ" "${commit_date}" +%s 2>/dev/null)"
+
+ if [ -z "${commit_date}" ]; then
+ return 0
+ fi
+ fi
+
+ if [ -e "${script_source}" ]; then
+ script_date="$(date -r "${script_source}" +%s)"
+ else
+ script_date="$(date +%s)"
+ fi
+
+ [ "${commit_date}" -ge "${script_date}" ]
+}
+
+self_update() {
+ if [ -z "${NETDATA_NO_UPDATER_SELF_UPDATE}" ] && newer_commit_date; then
+ info "Downloading newest version of updater script."
+
+ ndtmpdir=$(create_tmp_directory)
+ cd "$ndtmpdir" || exit 1
+
+ if _safe_download "https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-updater.sh" ./netdata-updater.sh; then
+ chmod +x ./netdata-updater.sh || exit 1
+ export ENVIRONMENT_FILE="${ENVIRONMENT_FILE}"
+ force_update=""
+ [ "$NETDATA_FORCE_UPDATE" = "1" ] && force_update="--force-update"
+ exec ./netdata-updater.sh --not-running-from-cron --no-updater-self-update "$force_update" --tmpdir-path "$(pwd)"
+ else
+ error "Failed to download newest version of updater script, continuing with current version."
+ fi
+ fi
+}
+
+parse_version() {
+ r="${1}"
+ if [ "${r}" = "latest" ]; then
+ # If we get ‘latest’ as a version, return the largest possible
+ # version value.
+ printf "99999999999999"
+ return 0
+ elif echo "${r}" | grep -q '^v.*'; then
+ # shellcheck disable=SC2001
+ # XXX: Need a regex group substitution here.
+ r="$(echo "${r}" | sed -e 's/^v\(.*\)/\1/')"
+ fi
+
+ tmpfile="$(mktemp)"
+ echo "${r}" | tr '-' ' ' > "${tmpfile}"
+ read -r v b _ < "${tmpfile}"
+
+ if echo "${b}" | grep -vEq "^[0-9]+$"; then
+ b="0"
+ fi
+
+ echo "${v}" | tr '.' ' ' > "${tmpfile}"
+ read -r maj min patch _ < "${tmpfile}"
+
+ rm -f "${tmpfile}"
+
+ printf "%03d%03d%03d%05d" "${maj}" "${min}" "${patch}" "${b}"
+}
+
+get_latest_version() {
+ if [ "${RELEASE_CHANNEL}" = "stable" ]; then
+ get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}" /dev/stdout
+ else
+ get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}" /dev/stdout
+ fi
+}
+
+validate_environment_file() {
+ if [ -n "${NETDATA_PREFIX+SET_BUT_NULL}" ] && [ -n "${REINSTALL_OPTIONS+SET_BUT_NULL}" ]; then
+ return 0
+ else
+ fatal "Environment file located at ${ENVIRONMENT_FILE} is not valid, unable to update." U0007
+ fi
+}
+
+update_available() {
+ if [ "$NETDATA_FORCE_UPDATE" = "1" ]; then
+ info "Force update requested"
+ return 0
+ fi
+ basepath="$(dirname "$(dirname "$(dirname "${NETDATA_LIB_DIR}")")")"
+ searchpath="${basepath}/bin:${basepath}/sbin:${basepath}/usr/bin:${basepath}/usr/sbin:${PATH}"
+ searchpath="${basepath}/netdata/bin:${basepath}/netdata/sbin:${basepath}/netdata/usr/bin:${basepath}/netdata/usr/sbin:${searchpath}"
+ ndbinary="$(PATH="${searchpath}" command -v netdata 2>/dev/null)"
+
+ if [ -z "${ndbinary}" ]; then
+ current_version=0
+ else
+ current_version="$(parse_version "$(${ndbinary} -v | cut -f 2 -d ' ')")"
+ fi
+
+ latest_tag="$(get_latest_version)"
+ latest_version="$(parse_version "${latest_tag}")"
+ path_version="$(echo "${latest_tag}" | cut -f 1 -d "-")"
+
+ # If we can't get the current version for some reason assume `0`
+ current_version="${current_version:-0}"
+
+ # If we can't get the latest version for some reason assume `0`
+ latest_version="${latest_version:-0}"
+
+ info "Current Version: ${current_version}"
+ info "Latest Version: ${latest_version}"
+
+ if [ "${latest_version}" -gt 0 ] && [ "${current_version}" -gt 0 ] && [ "${current_version}" -ge "${latest_version}" ]; then
+ info "Newest version (current=${current_version} >= latest=${latest_version}) is already installed"
+ return 1
+ else
+ info "Update available"
+ return 0
+ fi
+}
+
+set_tarball_urls() {
+ filename="netdata-latest.tar.gz"
+
+ if [ "$2" = "yes" ]; then
+ if [ -e /opt/netdata/etc/netdata/.install-type ]; then
+ # shellcheck disable=SC1091
+ . /opt/netdata/etc/netdata/.install-type
+ filename="netdata-${PREBUILT_ARCH}-latest.gz.run"
+ else
+ filename="netdata-x86_64-latest.gz.run"
+ fi
+ fi
+
+ if [ "$1" = "stable" ]; then
+ latest="$(get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}" /dev/stdout)"
+ export NETDATA_TARBALL_URL="${NETDATA_STABLE_BASE_URL}/download/$latest/${filename}"
+ export NETDATA_TARBALL_CHECKSUM_URL="${NETDATA_STABLE_BASE_URL}/download/$latest/sha256sums.txt"
+ else
+ tag="$(get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}" /dev/stdout)"
+ export NETDATA_TARBALL_URL="${NETDATA_NIGHTLY_BASE_URL}/download/${tag}/${filename}"
+ export NETDATA_TARBALL_CHECKSUM_URL="${NETDATA_NIGHTLY_BASE_URL}/download/${tag}/sha256sums.txt"
+ fi
+}
+
+update_build() {
+ [ -z "${logfile}" ] && info "Running on a terminal - (this script also supports running headless from crontab)"
+
+ RUN_INSTALLER=0
+ ndtmpdir=$(create_tmp_directory)
+ cd "$ndtmpdir" || fatal "Failed to change current working directory to ${ndtmpdir}" U0016
+
+ install_build_dependencies
+
+ if update_available; then
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${ndtmpdir}/sha256sum.txt" >&3 2>&3
+ download "${NETDATA_TARBALL_URL}" "${ndtmpdir}/netdata-latest.tar.gz"
+ if [ -n "${NETDATA_TARBALL_CHECKSUM}" ] &&
+ grep "${NETDATA_TARBALL_CHECKSUM}" sha256sum.txt >&3 2>&3 &&
+ [ "$NETDATA_FORCE_UPDATE" != "1" ]; then
+ info "Newest version is already installed"
+ else
+ if ! grep netdata-latest.tar.gz sha256sum.txt | safe_sha256sum -c - >&3 2>&3; then
+ fatal "Tarball checksum validation failed. Stopping netdata upgrade and leaving tarball in ${ndtmpdir}\nUsually this is a result of an older copy of the tarball or checksum file being cached somewhere upstream and can be resolved by retrying in an hour." U0008
+ fi
+ NEW_CHECKSUM="$(safe_sha256sum netdata-latest.tar.gz 2> /dev/null | cut -d' ' -f1)"
+ tar -xf netdata-latest.tar.gz >&3 2>&3
+ rm netdata-latest.tar.gz >&3 2>&3
+ if [ -z "$path_version" ]; then
+ latest_tag="$(get_latest_version)"
+ path_version="$(echo "${latest_tag}" | cut -f 1 -d "-")"
+ fi
+ cd "$(find . -maxdepth 1 -type d -name "netdata-${path_version}*" | head -n 1)" || fatal "Failed to switch to build directory" U0017
+ RUN_INSTALLER=1
+ fi
+ fi
+
+ # We got the sources, run the update now
+ if [ ${RUN_INSTALLER} -eq 1 ]; then
+ # signal netdata to start saving its database
+ # this is handy if your database is big
+ possible_pids=$(pidof netdata)
+ do_not_start=
+ if [ -n "${possible_pids}" ]; then
+ # shellcheck disable=SC2086
+ kill -USR1 ${possible_pids}
+ else
+ # netdata is currently not running, so do not start it after updating
+ do_not_start="--dont-start-it"
+ fi
+
+ env="env TMPDIR=${TMPDIR}"
+
+ if [ -n "${NETDATA_SELECTED_DASHBOARD}" ]; then
+ env="${env} NETDATA_SELECTED_DASHBOARD=${NETDATA_SELECTED_DASHBOARD}"
+ fi
+
+ if [ ! -x ./netdata-installer.sh ]; then
+ if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then
+ cd "$(find . -mindepth 1 -maxdepth 1 -type d)" || fatal "Failed to switch to build directory" U0018
+ fi
+ fi
+
+ if [ -e "${NETDATA_PREFIX}/etc/netdata/.install-type" ] ; then
+ install_type="$(cat "${NETDATA_PREFIX}"/etc/netdata/.install-type)"
+ else
+ install_type="INSTALL_TYPE='legacy-build'"
+ fi
+
+ if [ "${INSTALL_TYPE}" = "custom" ] && [ -f "${NETDATA_PREFIX}" ]; then
+ install_type="INSTALL_TYPE='legacy-build'"
+ fi
+
+ info "Re-installing netdata..."
+ export NETDATA_SAVE_WARNINGS=1
+ export NETDATA_PROPAGATE_WARNINGS=1
+ export NETDATA_WARNINGS="${NETDATA_WARNINGS}"
+ export NETDATA_SCRIPT_STATUS_PATH="${NETDATA_SCRIPT_STATUS_PATH}"
+ # shellcheck disable=SC2086
+ if ! ${env} ./netdata-installer.sh ${REINSTALL_OPTIONS} --dont-wait ${do_not_start} >&3 2>&3; then
+ if [ -r "${NETDATA_SCRIPT_STATUS_PATH}" ]; then
+ # shellcheck disable=SC1090
+ . "${NETDATA_SCRIPT_STATUS_PATH}"
+ rm -f "${NETDATA_SCRIPT_STATUS_PATH}"
+ fi
+ if [ -n "${EXIT_REASON}" ]; then
+ fatal "Failed to rebuild existing netdata install: ${EXIT_REASON}" "U${EXIT_CODE}"
+ else
+ fatal "Failed to rebuild existing netdata reinstall." UI0000
+ fi
+ fi
+
+ # We no longer store checksum info here. but leave this so that we clean up all environment files upon next update.
+ sed -i '/NETDATA_TARBALL/d' "${ENVIRONMENT_FILE}"
+
+ info "Updating tarball checksum info"
+ echo "${NEW_CHECKSUM}" > "${NETDATA_LIB_DIR}/netdata.tarball.checksum"
+
+ echo "${install_type}" > "${NETDATA_PREFIX}/etc/netdata/.install-type"
+ fi
+
+ rm -rf "${ndtmpdir}" >&3 2>&3
+ [ -n "${logfile}" ] && rm "${logfile}" && logfile=
+
+ return 0
+}
+
+update_static() {
+ ndtmpdir="$(create_tmp_directory)"
+ PREVDIR="$(pwd)"
+
+ info "Entering ${ndtmpdir}"
+ cd "${ndtmpdir}" || fatal "Failed to change current working directory to ${ndtmpdir}" U0019
+
+ if update_available; then
+ sysarch="${PREBUILT_ARCH}"
+ [ -z "$sysarch" ] && sysarch="$(uname -m)"
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${ndtmpdir}/sha256sum.txt"
+ download "${NETDATA_TARBALL_URL}" "${ndtmpdir}/netdata-${sysarch}-latest.gz.run"
+ if ! grep "netdata-${sysarch}-latest.gz.run" "${ndtmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Static binary checksum validation failed. Stopping netdata installation and leaving binary in ${ndtmpdir}\nUsually this is a result of an older copy of the file being cached somewhere and can be resolved by simply retrying in an hour." U000A
+ fi
+
+ if [ -e /opt/netdata/etc/netdata/.install-type ] ; then
+ install_type="$(cat /opt/netdata/etc/netdata/.install-type)"
+ else
+ install_type="INSTALL_TYPE='legacy-static'"
+ fi
+
+ # Do not pass any options other than the accept, for now
+ # shellcheck disable=SC2086
+ if sh "${ndtmpdir}/netdata-${sysarch}-latest.gz.run" --accept -- ${REINSTALL_OPTIONS} >&3 2>&3; then
+ rm -r "${ndtmpdir}"
+ else
+ info "NOTE: did not remove: ${ndtmpdir}"
+ fi
+
+ echo "${install_type}" > /opt/netdata/etc/netdata/.install-type
+ fi
+
+ if [ -e "${PREVDIR}" ]; then
+ info "Switching back to ${PREVDIR}"
+ cd "${PREVDIR}"
+ fi
+ [ -n "${logfile}" ] && rm "${logfile}" && logfile=
+ exit 0
+}
+
+update_binpkg() {
+ os_release_file=
+ if [ -s "/etc/os-release" ] && [ -r "/etc/os-release" ]; then
+ os_release_file="/etc/os-release"
+ elif [ -s "/usr/lib/os-release" ] && [ -r "/usr/lib/os-release" ]; then
+ os_release_file="/usr/lib/os-release"
+ else
+ fatal "Cannot find an os-release file ..." U000B
+ fi
+
+ # shellcheck disable=SC1090
+ . "${os_release_file}"
+
+ DISTRO="${ID}"
+
+ supported_compat_names="debian ubuntu centos fedora opensuse ol amzn"
+
+ if str_in_list "${DISTRO}" "${supported_compat_names}"; then
+ DISTRO_COMPAT_NAME="${DISTRO}"
+ else
+ case "${DISTRO}" in
+ opensuse-leap|opensuse-tumbleweed)
+ DISTRO_COMPAT_NAME="opensuse"
+ ;;
+ cloudlinux|almalinux|centos-stream|rocky|rhel)
+ DISTRO_COMPAT_NAME="centos"
+ ;;
+ *)
+ DISTRO_COMPAT_NAME="unknown"
+ ;;
+ esac
+ fi
+
+ interactive_opts=""
+ env=""
+
+ case "${DISTRO_COMPAT_NAME}" in
+ debian|ubuntu)
+ if [ "${INTERACTIVE}" = "0" ]; then
+ upgrade_subcmd="-o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold --only-upgrade install"
+ interactive_opts="-y"
+ env="DEBIAN_FRONTEND=noninteractive"
+ else
+ upgrade_subcmd="--only-upgrade install"
+ fi
+ pm_cmd="apt-get"
+ repo_subcmd="update"
+ install_subcmd="install"
+ mark_auto_cmd="apt-mark auto"
+ pkg_install_opts="${interactive_opts}"
+ repo_update_opts="${interactive_opts}"
+ pkg_installed_check="dpkg-query -s"
+ INSTALL_TYPE="binpkg-deb"
+ ;;
+ centos|fedora|ol|amzn)
+ if [ "${INTERACTIVE}" = "0" ]; then
+ interactive_opts="-y"
+ fi
+ if command -v dnf > /dev/null; then
+ pm_cmd="dnf"
+ repo_subcmd="makecache"
+ mark_auto_cmd="dnf mark remove"
+ else
+ pm_cmd="yum"
+ mark_auto_cmd="yumdb set reason dep"
+ fi
+ upgrade_subcmd="upgrade"
+ install_subcmd="install"
+ pkg_install_opts="${interactive_opts}"
+ repo_update_opts="${interactive_opts}"
+ pkg_installed_check="rpm -q"
+ INSTALL_TYPE="binpkg-rpm"
+ ;;
+ opensuse)
+ if [ "${INTERACTIVE}" = "0" ]; then
+ upgrade_subcmd="--non-interactive update"
+ else
+ upgrade_subcmd="update"
+ fi
+ pm_cmd="zypper"
+ repo_subcmd="--gpg-auto-import-keys refresh"
+ install_subcmd="install"
+ mark_auto_cmd=""
+ pkg_install_opts=""
+ repo_update_opts=""
+ pkg_installed_check="rpm -q"
+ INSTALL_TYPE="binpkg-rpm"
+ ;;
+ *)
+ warning "We do not provide native packages for ${DISTRO}."
+ return 2
+ ;;
+ esac
+
+ if [ -n "${repo_subcmd}" ]; then
+ # shellcheck disable=SC2086
+ env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts} >&3 2>&3 || fatal "Failed to update repository metadata." U000C
+ fi
+
+ for repopkg in netdata-repo netdata-repo-edge; do
+ if ${pkg_installed_check} ${repopkg} > /dev/null 2>&1; then
+ # shellcheck disable=SC2086
+ env ${env} ${pm_cmd} ${upgrade_subcmd} ${pkg_install_opts} ${repopkg} >&3 2>&3 || fatal "Failed to update Netdata repository config." U000D
+ # shellcheck disable=SC2086
+ if [ -n "${repo_subcmd}" ]; then
+ env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts} >&3 2>&3 || fatal "Failed to update repository metadata." U000E
+ fi
+ fi
+ done
+
+ # shellcheck disable=SC2086
+ env ${env} ${pm_cmd} ${upgrade_subcmd} ${pkg_install_opts} netdata >&3 2>&3 || fatal "Failed to update Netdata package." U000F
+
+ if ${pkg_installed_check} systemd > /dev/null 2>&1; then
+ if [ "${NETDATA_NO_SYSTEMD_JOURNAL}" -eq 0 ]; then
+ if ! ${pkg_installed_check} netdata-plugin-systemd-journal > /dev/null 2>&1; then
+ env ${env} ${pm_cmd} ${install_subcmd} ${pkg_install_opts} netdata-plugin-systemd-journal >&3 2>&3
+
+ if [ -n "${mark_auto_cmd}" ]; then
+ # shellcheck disable=SC2086
+ env ${env} ${mark_auto_cmd} netdata-plugin-systemd-journal >&3 2>&3
+ fi
+ fi
+ fi
+ fi
+
+ [ -n "${logfile}" ] && rm "${logfile}" && logfile=
+ return 0
+}
+
+# Simple function to encapsulate original updater behavior.
+update_legacy() {
+ set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}"
+ case "${IS_NETDATA_STATIC_BINARY}" in
+ yes) update_static && exit 0 ;;
+ *) update_build && exit 0 ;;
+ esac
+}
+
+logfile=
+ndtmpdir=
+
+trap cleanup EXIT
+
+if [ -t 2 ] || [ "${GITHUB_ACTIONS}" ]; then
+ # we are running on a terminal or under CI
+ # open fd 3 and send it to stderr
+ exec 3>&2
+else
+ # we are headless
+ # create a temporary file for the log
+ logfile="$(mktemp -t netdata-updater.log.XXXXXX)"
+ # open fd 3 and send it to logfile
+ exec 3> "${logfile}"
+fi
+
+: "${ENVIRONMENT_FILE:=THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT}"
+
+if [ "${ENVIRONMENT_FILE}" = "THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT" ]; then
+ if [ -r "${script_dir}/../../../etc/netdata/.environment" ] || [ -r "${script_dir}/../../../etc/netdata/.install-type" ]; then
+ ENVIRONMENT_FILE="${script_dir}/../../../etc/netdata/.environment"
+ elif [ -r "/etc/netdata/.environment" ] || [ -r "/etc/netdata/.install-type" ]; then
+ ENVIRONMENT_FILE="/etc/netdata/.environment"
+ elif [ -r "/opt/netdata/etc/netdata/.environment" ] || [ -r "/opt/netdata/etc/netdata/.install-type" ]; then
+ ENVIRONMENT_FILE="/opt/netdata/etc/netdata/.environment"
+ else
+ envpath="$(find / -type d \( -path /sys -o -path /proc -o -path /dev \) -prune -false -o -path '*netdata/.environment' -type f 2> /dev/null | head -n 1)"
+ itpath="$(find / -type d \( -path /sys -o -path /proc -o -path /dev \) -prune -false -o -path '*netdata/.install-type' -type f 2> /dev/null | head -n 1)"
+ if [ -r "${envpath}" ]; then
+ ENVIRONMENT_FILE="${envpath}"
+ elif [ -r "${itpath}" ]; then
+ ENVIRONMENT_FILE="$(dirname "${itpath}")/.environment"
+ else
+ fatal "Cannot find environment file or install type file, unable to update." U0010
+ fi
+ fi
+fi
+
+if [ -r "${ENVIRONMENT_FILE}" ] ; then
+ # shellcheck source=/dev/null
+ . "${ENVIRONMENT_FILE}" || fatal "Failed to source ${ENVIRONMENT_FILE}" U0014
+fi
+
+if [ -r "$(dirname "${ENVIRONMENT_FILE}")/.install-type" ]; then
+ # shellcheck source=/dev/null
+ . "$(dirname "${ENVIRONMENT_FILE}")/.install-type" || fatal "Failed to source $(dirname "${ENVIRONMENT_FILE}")/.install-type" U0015
+fi
+
+if [ -r "$(dirname "${ENVIRONMENT_FILE}")/netdata-updater.conf" ]; then
+ # shellcheck source=/dev/null
+ . "$(dirname "${ENVIRONMENT_FILE}")/netdata-updater.conf"
+fi
+
+while [ -n "${1}" ]; do
+ case "${1}" in
+ --not-running-from-cron) NETDATA_NOT_RUNNING_FROM_CRON=1 ;;
+ --no-updater-self-update) NETDATA_NO_UPDATER_SELF_UPDATE=1 ;;
+ --force-update) NETDATA_FORCE_UPDATE=1 ;;
+ --non-interactive) INTERACTIVE=0 ;;
+ --interactive) INTERACTIVE=1 ;;
+ --tmpdir-path)
+ NETDATA_TMPDIR_PATH="${2}"
+ shift 1
+ ;;
+ --enable-auto-updates)
+ enable_netdata_updater "${2}"
+ exit $?
+ ;;
+ --disable-auto-updates)
+ disable_netdata_updater
+ exit $?
+ ;;
+ *) fatal "Unrecognized option ${1}" U001A ;;
+ esac
+
+ shift 1
+done
+
+# Random sleep to alleviate stampede effect of Agents upgrading
+# and disconnecting/reconnecting at the same time (or near to).
+# But only we're not a controlling terminal (tty)
+# Randomly sleep between 1s and 60m
+if [ ! -t 1 ] && \
+ [ -z "${GITHUB_ACTIONS}" ] && \
+ [ -z "${NETDATA_NOT_RUNNING_FROM_CRON}" ] && \
+ is_integer "${NETDATA_UPDATER_JITTER}" && \
+ [ "${NETDATA_UPDATER_JITTER}" -gt 1 ]; then
+ rnd="$(awk "
+ BEGIN { srand()
+ printf(\"%d\\n\", ${NETDATA_UPDATER_JITTER} * rand())
+ }")"
+ sleep $(((rnd % NETDATA_UPDATER_JITTER) + 1))
+fi
+
+# We dont expect to find lib dir variable on older installations, so load this path if none found
+export NETDATA_LIB_DIR="${NETDATA_LIB_DIR:-${NETDATA_PREFIX}/var/lib/netdata}"
+
+# Source the tarball checksum, if not already available from environment (for existing installations with the old logic)
+[ -z "${NETDATA_TARBALL_CHECKSUM}" ] && [ -f "${NETDATA_LIB_DIR}/netdata.tarball.checksum" ] && NETDATA_TARBALL_CHECKSUM="$(cat "${NETDATA_LIB_DIR}/netdata.tarball.checksum")"
+
+if echo "$INSTALL_TYPE" | grep -qv ^binpkg && [ "${INSTALL_UID}" != "$(id -u)" ]; then
+ fatal "You are running this script as user with uid $(id -u). We recommend to run this script as root (user with uid 0)" U0011
+fi
+
+self_update
+
+# shellcheck disable=SC2153
+case "${INSTALL_TYPE}" in
+ *-build)
+ validate_environment_file
+ set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}"
+ update_build && exit 0
+ ;;
+ *-static*)
+ validate_environment_file
+ set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}"
+ update_static && exit 0
+ ;;
+ *binpkg*) update_binpkg && exit 0 ;;
+ "") # Fallback case for no `.install-type` file. This just works like the old install type detection.
+ validate_environment_file
+ update_legacy
+ ;;
+ custom)
+ # At this point, we _should_ have a valid `.environment` file, but it's best to just check.
+ # If we do, then behave like the legacy updater.
+ if validate_environment_file && [ -n "${IS_NETDATA_STATIC_BINARY}" ]; then
+ update_legacy
+ else
+ fatal "This script does not support updating custom installations without valid environment files." U0012
+ fi
+ ;;
+ oci) fatal "This script does not support updating Netdata inside our official Docker containers, please instead update the container itself." U0013 ;;
+ *) fatal "Unrecognized installation type (${INSTALL_TYPE}), unable to update." U0014 ;;
+esac
diff --git a/packaging/jsonc.checksums b/packaging/jsonc.checksums
new file mode 100644
index 00000000..6005f455
--- /dev/null
+++ b/packaging/jsonc.checksums
@@ -0,0 +1 @@
+ec4eb70e0f6c0d707b9b1ec646cf7c860f4abb3562a90ea6e4d78d177fd95303 json-c-0.14-20200419.tar.gz
diff --git a/packaging/jsonc.version b/packaging/jsonc.version
new file mode 100644
index 00000000..29b56199
--- /dev/null
+++ b/packaging/jsonc.version
@@ -0,0 +1 @@
+0.14-20200419
diff --git a/packaging/libbpf_0_0_9.checksums b/packaging/libbpf_0_0_9.checksums
new file mode 100644
index 00000000..d4ff87a1
--- /dev/null
+++ b/packaging/libbpf_0_0_9.checksums
@@ -0,0 +1 @@
+fc33402ba33c8f8c5aa18afbb86a9932965886f2906c50e8f2110a1a2126e3ee v0.0.9_netdata-1.tar.gz
diff --git a/packaging/libbpf_0_0_9.version b/packaging/libbpf_0_0_9.version
new file mode 100644
index 00000000..d2362909
--- /dev/null
+++ b/packaging/libbpf_0_0_9.version
@@ -0,0 +1 @@
+0.0.9_netdata-1
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
new file mode 100644
index 00000000..3d759ecf
--- /dev/null
+++ b/packaging/maintainers/README.md
@@ -0,0 +1,80 @@
+# Package maintainers
+
+This page tracks the package maintainers for Netdata, for various operating systems and versions.
+
+> Feel free to update it, so that it reflects the current status.
+
+---
+
+## Official Linux Distributions
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) |
+| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) |
+| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) |
+| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) |
+| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) |
+| Ubuntu | | | |
+| Red Hat / Fedora / CentOS | | | |
+| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) |
+
+---
+
+## FreeBSD
+
+| System | Initial PR | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| FreeBSD | #1321 | @vlvkobal|@mmokhi
+
+---
+
+## macOS
+
+| System | URL | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
+
+---
+
+## Unofficial Linux Packages
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 |
+---
+
+## Embedded Linux
+
+| Embedded Linux | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 |
+| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) |
+| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata |
+| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 |
+| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi |
+
+---
+
+## Linux Containers
+
+| Containers | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Docker | Git | @titpetric | https://github.com/titpetric/netdata |
+
+---
+
+## Automation Systems
+
+| Automation Systems | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ |
+| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook |
+
+---
+
+## Packages summary from repology.org
+
+[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions)
+
+
diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md
new file mode 100644
index 00000000..1f2c746b
--- /dev/null
+++ b/packaging/makeself/README.md
@@ -0,0 +1,75 @@
+<!--
+title: "Netdata static binary build"
+description: "Users can build the static 64-bit binary package that we ship with every release of the open-source Netdata Agent for debugging or specialize purposes."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/makeself/README.md
+sidebar_label: "Static binary packages"
+learn_status: "Published"
+learn_rel_path: "Installation/Installation methods"
+sidebar_position: 30
+-->
+
+# Netdata static binary build
+
+We publish pre-built static builds of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7,
+AArch64, and POWER8+ hardware. These static builds are able to operate in a mostly self-contained manner and only
+require a POSIX compliant shell and a supported init system. These static builds install under `/opt/netdata`. If
+you are on a platform which we provide static builds for but do not provide native packages for, a static build
+will be used by default for installation.
+
+If you want to enforce the usage of a static build and have the installer return a failure if one is not available,
+you can do so by adding `--static-only` to the options you pass to the installer.
+
+## Requirements
+
+- Container runtime tool (Docker or Podman)
+
+## Building a static binary package
+
+Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
+Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+
+
+To build the static binary 64-bit distribution package, into the root folder on the netdata repo, run:
+
+```bash
+./packaging/makeself/build-static.sh x86_64
+```
+
+The program will:
+
+1. setup a new docker container with Alpine Linux
+2. install the required alpine packages (the build environment, needed libraries, etc)
+3. download and compile third party apps that are packaged with Netdata (`bash`, `curl`, etc)
+4. compile Netdata
+
+Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the Netdata binary package that can be run to install Netdata on any other computer.
+
+You can build static binaries for other architectures such as `armv7l`, `aarch64`, and `ppc64le`.
+
+## Building binaries with debug info
+
+To build Netdata binaries with debugging / tracing information in them, use:
+
+```bash
+cd /path/to/netdata.git
+./packaging/makeself/build-static.sh x86_64 debug
+```
+
+These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information).
+
+## Debugging Netdata binaries
+
+Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start Netdata:
+
+```bash
+PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D
+```
+
+The above command, will run Netdata under `valgrind`. While Netdata runs under `valgrind` it will be 10x slower and use a lot more memory.
+
+If Netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know.
+
+To stop Netdata while it runs under `valgrind`, press Control-C on the console.
+
+> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static Netdata binary, all the required libraries are built into Netdata. So, valgrind cannot apply its heuristics and prints them.
+>
diff --git a/packaging/makeself/build-static.sh b/packaging/makeself/build-static.sh
new file mode 100755
index 00000000..0c46c12a
--- /dev/null
+++ b/packaging/makeself/build-static.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/installer/functions.sh
+. "$(dirname "$0")"/../installer/functions.sh || exit 1
+
+BUILDARCH="${1}"
+
+set -e
+
+platform="$("$(dirname "${0}")/uname2platform.sh" "${BUILDARCH}")"
+
+if [ -z "${platform}" ]; then
+ exit 1
+fi
+
+if command -v docker > /dev/null 2>&1; then
+ docker="docker"
+elif command -v podman > /dev/null 2>&1; then
+ docker="podman"
+else
+ echo "Could not find a usable OCI runtime, need either Docker or Podman."
+ exit 1
+fi
+
+DOCKER_IMAGE_NAME="netdata/static-builder:v1"
+
+if [ "${BUILDARCH}" != "$(uname -m)" ] && [ "$(uname -m)" = 'x86_64' ] && [ -z "${SKIP_EMULATION}" ]; then
+ ${docker} run --rm --privileged multiarch/qemu-user-static --reset -p yes || exit 1
+fi
+
+if ${docker} inspect "${DOCKER_IMAGE_NAME}" > /dev/null 2>&1; then
+ if ${docker} image inspect "${DOCKER_IMAGE_NAME}" | grep -q 'Variant'; then
+ img_platform="$(${docker} image inspect "${DOCKER_IMAGE_NAME}" --format '{{.Os}}/{{.Architecture}}/{{.Variant}}')"
+ else
+ img_platform="$(${docker} image inspect "${DOCKER_IMAGE_NAME}" --format '{{.Os}}/{{.Architecture}}')"
+ fi
+
+ if [ "${img_platform}" != "${platform}" ]; then
+ ${docker} image rm "${DOCKER_IMAGE_NAME}" || exit 1
+ fi
+fi
+
+if ! ${docker} inspect "${DOCKER_IMAGE_NAME}" > /dev/null 2>&1; then
+ ${docker} pull --platform "${platform}" "${DOCKER_IMAGE_NAME}"
+fi
+
+# Run the build script inside the container
+if [ -t 1 ]; then
+ run ${docker} run --rm -e BUILDARCH="${BUILDARCH}" -a stdin -a stdout -a stderr -i -t -v "$(pwd)":/netdata:rw \
+ "${DOCKER_IMAGE_NAME}" \
+ /bin/sh /netdata/packaging/makeself/build.sh "${@}"
+else
+ run ${docker} run --rm -e BUILDARCH="${BUILDARCH}" -v "$(pwd)":/netdata:rw \
+ -e GITHUB_ACTIONS="${GITHUB_ACTIONS}" "${DOCKER_IMAGE_NAME}" \
+ /bin/sh /netdata/packaging/makeself/build.sh "${@}"
+fi
diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh
new file mode 100755
index 00000000..59d2078f
--- /dev/null
+++ b/packaging/makeself/build-x86_64-static.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+"${SCRIPT_DIR}/build-static.sh" x86_64 "${@}"
diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh
new file mode 100755
index 00000000..3ac600ed
--- /dev/null
+++ b/packaging/makeself/build.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+# parse command line arguments
+
+set -e
+
+export NETDATA_BUILD_WITH_DEBUG=0
+
+while [ -n "${1}" ]; do
+ case "${1}" in
+ debug)
+ export NETDATA_BUILD_WITH_DEBUG=1
+ ;;
+
+ *) ;;
+
+ esac
+
+ shift
+done
+
+# -----------------------------------------------------------------------------
+
+# First run install-alpine-packages.sh under alpine linux to install
+# the required packages. build-x86_64-static.sh will do this for you
+# using docker.
+
+mkdir -p /usr/src
+cp -va /netdata /usr/src/netdata
+chown -R root:root /usr/src/netdata
+
+cd /usr/src/netdata/packaging/makeself || exit 1
+
+git clean -dxf
+git submodule foreach --recursive git clean -dxf
+
+cat >&2 << EOF
+This program will create a self-extracting shell package containing
+a statically linked netdata, able to run on any 64bit Linux system,
+without any dependencies from the target system.
+
+It can be used to have netdata running in no-time, or in cases the
+target Linux system cannot compile netdata.
+EOF
+
+if [ ! -d tmp ]; then
+ mkdir tmp || exit 1
+else
+ rm -rf tmp/*
+fi
+
+if [ -z "${GITHUB_ACTIONS}" ]; then
+ export GITHUB_ACTIONS=false
+fi
+
+if ! ./run-all-jobs.sh "$@"; then
+ printf >&2 "Build failed."
+ exit 1
+fi
+
+mkdir -p /netdata/artifacts
+cp -va /usr/src/netdata/artifacts/* /netdata/artifacts/
+chown -R "$(stat -c '%u:%g' /netdata)" /netdata/artifacts/
diff --git a/packaging/makeself/bundled-packages b/packaging/makeself/bundled-packages
new file mode 100644
index 00000000..02ee4469
--- /dev/null
+++ b/packaging/makeself/bundled-packages
@@ -0,0 +1,16 @@
+# Source of truth for all the packages we bundle in static builds
+PACKAGES=("OPENSSL" "CURL" "BASH" "IOPING" "LIBNETFILTER_ACT")
+SOURCE_TYPES=("GH_REPO_CLONE" "GH_REPO_CLONE" "DW_TARBALL" "GH_REPO_SOURCE" "DW_TARBALL")
+OPENSSL_VERSION="openssl-3.1.4"
+OPENSSL_SOURCE="https://github.com/openssl/openssl"
+CURL_VERSION="curl-8_4_0"
+CURL_SOURCE="https://github.com/curl/curl"
+BASH_VERSION="5.1.16"
+BASH_ARTIFACT_SOURCE="http://ftp.gnu.org/gnu/bash"
+BASH_ARTIFACT_SHA256="5bac17218d3911834520dad13cd1f85ab944e1c09ae1aba55906be1f8192f558"
+IOPING_VERSION="1.3"
+IOPING_SOURCE="https://github.com/koct9i/ioping"
+IOPING_ARTIFACT_SHA256="7aa48e70aaa766bc112dea57ebbe56700626871052380709df3a26f46766e8c8"
+LIBNETFILTER_ACT_VERSION="1.0.3"
+LIBNETFILTER_ACT_SOURCE="https://www.netfilter.org/projects/libnetfilter_acct/files"
+LIBNETFILTER_ACT_ARTIFACT_SHA256="4250ceef3efe2034f4ac05906c3ee427db31b9b0a2df41b2744f4bf79a959a1a"
diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh
new file mode 100755
index 00000000..c3289c7c
--- /dev/null
+++ b/packaging/makeself/functions.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+
+# allow running the jobs by hand
+[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0
+[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+[ -z "${NETDATA_MAKESELF_PATH}" ] && NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.."
+[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+export NETDATA_MAKESELF_PATH NETDATA_MAKESELF_PATH
+export NULL=
+
+# make sure the path does not end with /
+if [ "${NETDATA_INSTALL_PATH:$((${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ]; then
+ export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$((${#NETDATA_INSTALL_PATH} - 1))}"
+fi
+
+# find the parent directory
+NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")"
+export NETDATA_INSTALL_PARENT
+
+# -----------------------------------------------------------------------------
+
+# bash strict mode
+set -euo pipefail
+
+# -----------------------------------------------------------------------------
+
+fetch() {
+ local dir="${1}" url="${2}" sha256="${3}" key="${4}"
+ local tar
+ tar="$(basename "${2}")"
+ local cache="${NETDATA_SOURCE_PATH}/artifacts/cache/${BUILDARCH}/${key}"
+
+ if [ -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]; then
+ rm -rf "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
+ fi
+
+ if [ -d "${cache}/${dir}" ]; then
+ echo "Found cached copy of build directory for ${key}, using it."
+ cp -a "${cache}/${dir}" "${NETDATA_MAKESELF_PATH}/tmp/"
+ CACHE_HIT=1
+ else
+ echo "No cached copy of build directory for ${key} found, fetching sources instead."
+
+ if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]; then
+ run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}"
+ fi
+
+ # Check SHA256 of gzip'd tar file (apparently alpine's sha256sum requires
+ # two empty spaces between the checksum and the file's path)
+ set +e
+ echo "${sha256} ${NETDATA_MAKESELF_PATH}/tmp/${tar}" | sha256sum -c -s
+ local rc=$?
+ if [ ${rc} -ne 0 ]; then
+ echo >&2 "SHA256 verification of tar file ${tar} failed (rc=${rc})"
+ echo >&2 "expected: ${sha256}, got $(sha256sum "${NETDATA_MAKESELF_PATH}/tmp/${tar}")"
+ exit 1
+ fi
+
+ set -e
+ cd "${NETDATA_MAKESELF_PATH}/tmp"
+ run tar -axpf "${tar}"
+ cd -
+
+ CACHE_HIT=0
+ fi
+
+ run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
+}
+
+store_cache() {
+ key="${1}"
+ src="${2}"
+
+ cache="${NETDATA_SOURCE_PATH}/artifacts/cache/${BUILDARCH}/${key}"
+
+ if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ if [ -d "${cache}" ]; then
+ rm -rf "${cache}"
+ fi
+
+ mkdir -p "${cache}"
+
+ cp -a "${src}" "${cache}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+# load the functions of the netdata-installer.sh
+# shellcheck source=packaging/installer/functions.sh
+. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh"
+
+# -----------------------------------------------------------------------------
+
+# debug
+echo "ME=${0}"
+echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}"
+echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}"
+echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}"
+echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}"
+echo "PROCESSORS=$(nproc)"
diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh
new file mode 100755
index 00000000..d7974339
--- /dev/null
+++ b/packaging/makeself/install-alpine-packages.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env sh
+#
+# Installation script for the alpine host
+# to prepare the static binary
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
+
+apk update || exit 1
+apk upgrade || exit 1
+
+# Add required APK packages
+apk add --no-cache -U \
+ alpine-sdk \
+ autoconf \
+ automake \
+ bash \
+ binutils \
+ cmake \
+ curl \
+ elfutils-dev \
+ gcc \
+ git \
+ gnutls-dev \
+ gzip \
+ jq \
+ libelf-static \
+ libmnl-dev \
+ libnetfilter_acct-dev \
+ libtool \
+ libuv-dev \
+ libuv-static \
+ lz4-dev \
+ lz4-static \
+ make \
+ ncurses \
+ netcat-openbsd \
+ openssh \
+ pkgconfig \
+ protobuf-dev \
+ snappy-dev \
+ snappy-static \
+ util-linux-dev \
+ wget \
+ xz \
+ zlib-dev \
+ zlib-static ||
+ exit 1
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
new file mode 100755
index 00000000..e4c13345
--- /dev/null
+++ b/packaging/makeself/install-or-update.sh
@@ -0,0 +1,248 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/makeself/functions.sh
+. "$(dirname "${0}")"/functions.sh
+
+export LC_ALL=C
+umask 002
+
+# Be nice on production environments
+renice 19 $$ > /dev/null 2> /dev/null
+
+NETDATA_PREFIX="/opt/netdata"
+NETDATA_USER_CONFIG_DIR="${NETDATA_PREFIX}/etc/netdata"
+
+# -----------------------------------------------------------------------------
+if [ -d /opt/netdata/etc/netdata.old ]; then
+ progress "Found old etc/netdata directory, reinstating this"
+ [ -d /opt/netdata/etc/netdata.new ] && rm -rf /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata.old /opt/netdata/etc/netdata
+
+ progress "Trigger stock config clean up"
+ rm -f /opt/netdata/etc/netdata/.installer-cleanup-of-stock-configs-done
+fi
+
+STARTIT=1
+REINSTALL_OPTIONS=""
+RELEASE_CHANNEL="nightly"
+
+while [ "${1}" ]; do
+ case "${1}" in
+ "--dont-start-it")
+ STARTIT=0
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--auto-update" | "-u") ;;
+ "--stable-channel")
+ RELEASE_CHANNEL="stable"
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--nightly-channel")
+ RELEASE_CHANNEL="nightly"
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--disable-telemetry")
+ NETDATA_DISABLE_TELEMETRY=1
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+
+ *) echo >&2 "Unknown option '${1}'. Ignoring it." ;;
+ esac
+ shift 1
+done
+
+if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
+ [ -n "$DISABLE_TELEMETRY" ] ||
+ [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] ||
+ [ -n "$DO_NOT_TRACK" ]; then
+ NETDATA_DISABLE_TELEMETRY=1
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --disable-telemetry"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Attempt to create user/group netdata/netadata"
+
+NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody I2C"
+NETDATA_ADDED_TO_GROUPS=""
+# Default user/group
+NETDATA_USER="root"
+NETDATA_GROUP="root"
+
+if portable_add_group netdata; then
+ if portable_add_user netdata "/opt/netdata"; then
+ progress "Add user netdata to required user groups"
+ for g in ${NETDATA_WANTED_GROUPS}; do
+ # shellcheck disable=SC2086
+ if portable_add_user_to_group ${g} netdata; then
+ NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}"
+ else
+ run_failed "Failed to add netdata user to secondary groups"
+ fi
+ done
+ # Netdata must be able to read /etc/pve/qemu-server/* and /etc/pve/lxc/*
+ # for reading VMs/containers names, CPU and memory limits on Proxmox.
+ if [ -d "/etc/pve" ]; then
+ portable_add_user_to_group "www-data" netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} www-data"
+ fi
+ NETDATA_USER="netdata"
+ NETDATA_GROUP="netdata"
+ else
+ run_failed "I could not add user netdata, will be using root"
+ fi
+else
+ run_failed "I could not add group netdata, so no user netdata will be created as well. Netdata run as root:root"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install logrotate configuration for netdata"
+
+install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
+
+# -----------------------------------------------------------------------------
+progress "Telemetry configuration"
+
+# Opt-out from telemetry program
+if [ -n "${NETDATA_DISABLE_TELEMETRY}" ]; then
+ run touch "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+else
+ printf "You can opt out from anonymous statistics via the --disable-telemetry option, or by creating an empty file %s \n\n" "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install netdata at system init"
+
+install_netdata_service || run_failed "Cannot install netdata init service."
+
+set_netdata_updater_channel || run_failed "Cannot set netdata updater tool release channel to '${RELEASE_CHANNEL}'"
+
+# -----------------------------------------------------------------------------
+progress "Install (but not enable) netdata updater tool"
+install_netdata_updater || run_failed "Cannot install netdata updater tool."
+
+# -----------------------------------------------------------------------------
+progress "creating quick links"
+
+dir_should_be_link() {
+ local p="${1}" t="${2}" d="${3}" old
+
+ old="${PWD}"
+ cd "${p}" || return 0
+
+ if [ -e "${d}" ]; then
+ if [ -h "${d}" ]; then
+ run rm "${d}"
+ else
+ run mv -f "${d}" "${d}.old.$$"
+ fi
+ fi
+
+ run ln -s "${t}" "${d}"
+ cd "${old}" || true
+}
+
+dir_should_be_link . bin sbin
+dir_should_be_link usr ../bin bin
+dir_should_be_link usr ../bin sbin
+dir_should_be_link usr . local
+
+dir_should_be_link . etc/netdata netdata-configs
+dir_should_be_link . usr/share/netdata/web netdata-web-files
+dir_should_be_link . usr/libexec/netdata netdata-plugins
+dir_should_be_link . var/lib/netdata netdata-dbs
+dir_should_be_link . var/cache/netdata netdata-metrics
+dir_should_be_link . var/log/netdata netdata-logs
+
+dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig
+
+# -----------------------------------------------------------------------------
+progress "fix permissions"
+
+run chmod g+rx,o+rx /opt
+run find /opt/netdata -type d -exec chmod go+rx '{}' \+
+run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata/var
+
+if [ -d /opt/netdata/usr/libexec/netdata/plugins.d/ebpf.d ]; then
+ run chown -R root:${NETDATA_GROUP} /opt/netdata/usr/libexec/netdata/plugins.d/ebpf.d
+fi
+
+# -----------------------------------------------------------------------------
+
+progress "changing plugins ownership and permissions"
+
+for x in apps.plugin perf.plugin slabinfo.plugin debugfs.plugin freeipmi.plugin ioping cgroup-network local-listeners ebpf.plugin nfacct.plugin xenstat.plugin python.d.plugin charts.d.plugin go.d.plugin ioping.plugin cgroup-network-helper.sh; do
+ f="usr/libexec/netdata/plugins.d/${x}"
+ if [ -f "${f}" ]; then
+ run chown root:${NETDATA_GROUP} "${f}"
+ fi
+done
+
+if command -v setcap >/dev/null 2>&1; then
+ run setcap "cap_dac_read_search,cap_sys_ptrace=ep" "usr/libexec/netdata/plugins.d/apps.plugin"
+ run setcap "cap_dac_read_search=ep" "usr/libexec/netdata/plugins.d/slabinfo.plugin"
+ run setcap "cap_dac_read_search=ep" "usr/libexec/netdata/plugins.d/debugfs.plugin"
+
+ if command -v capsh >/dev/null 2>&1 && capsh --supports=cap_perfmon 2>/dev/null ; then
+ run setcap "cap_perfmon=ep" "usr/libexec/netdata/plugins.d/perf.plugin"
+ else
+ run setcap "cap_sys_admin=ep" "usr/libexec/netdata/plugins.d/perf.plugin"
+ fi
+
+ run setcap "cap_net_admin,cap_net_raw=eip" "usr/libexec/netdata/plugins.d/go.d.plugin"
+else
+ for x in apps.plugin perf.plugin slabinfo.plugin debugfs.plugin; do
+ f="usr/libexec/netdata/plugins.d/${x}"
+ run chmod 4750 "${f}"
+ done
+fi
+
+for x in freeipmi.plugin ioping cgroup-network local-listeners ebpf.plugin nfacct.plugin xenstat.plugin; do
+ f="usr/libexec/netdata/plugins.d/${x}"
+
+ if [ -f "${f}" ]; then
+ run chmod 4750 "${f}"
+ fi
+done
+
+# -----------------------------------------------------------------------------
+
+echo "Configure TLS certificate paths"
+if [ ! -L /opt/netdata/etc/ssl ] && [ -d /opt/netdata/etc/ssl ] ; then
+ echo "Preserving existing user configuration for TLS"
+else
+ if [ -d /etc/pki/tls ] ; then
+ echo "Using /etc/pki/tls for TLS configuration and certificates"
+ ln -sf /etc/pki/tls /opt/netdata/etc/ssl
+ elif [ -d /etc/ssl ] ; then
+ echo "Using /etc/ssl for TLS configuration and certificates"
+ ln -sf /etc/ssl /opt/netdata/etc/ssl
+ else
+ echo "Using bundled TLS configuration and certificates"
+ ln -sf /opt/netdata/share/ssl /opt/netdata/etc/ssl
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+echo "Save install options"
+grep -qv 'IS_NETDATA_STATIC_BINARY="yes"' "${NETDATA_PREFIX}/etc/netdata/.environment" || echo IS_NETDATA_STATIC_BINARY=\"yes\" >> "${NETDATA_PREFIX}/etc/netdata/.environment"
+sed -i "s/REINSTALL_OPTIONS=\".*\"/REINSTALL_OPTIONS=\"${REINSTALL_OPTIONS}\"/" "${NETDATA_PREFIX}/etc/netdata/.environment"
+
+# -----------------------------------------------------------------------------
+if [ ${STARTIT} -eq 0 ]; then
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+ netdata_banner "is installed now!"
+else
+ progress "starting netdata"
+
+ if ! restart_netdata "${NETDATA_PREFIX}/bin/netdata"; then
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+ netdata_banner "is installed and running now!"
+ else
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf"
+ netdata_banner "is installed now!"
+ fi
+fi
+run chmod 0644 "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh
new file mode 100755
index 00000000..4686841b
--- /dev/null
+++ b/packaging/makeself/jobs/10-prepare-destination.install.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Preparing build environment" || true
+
+[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old"
+[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old"
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin"
+run mkdir -p "${NETDATA_INSTALL_PATH}/usr"
+run cd "${NETDATA_INSTALL_PATH}" || exit 1
+run ln -s bin sbin
+run cd "${NETDATA_INSTALL_PATH}/usr" || exit 1
+run ln -s ../bin bin
+run ln -s ../sbin sbin
+run ln -s . local
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/20-openssl.install.sh b/packaging/makeself/jobs/20-openssl.install.sh
new file mode 100755
index 00000000..1158a633
--- /dev/null
+++ b/packaging/makeself/jobs/20-openssl.install.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+# Source of truth for all the packages we bundle in static builds
+. "$(dirname "${0}")/../bundled-packages"
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building OpenSSL" || true
+
+export CFLAGS='-fno-lto -pipe'
+export LDFLAGS='-static'
+export PKG_CONFIG="pkg-config --static"
+
+if [ -d "${NETDATA_MAKESELF_PATH}/tmp/openssl" ]; then
+ rm -rf "${NETDATA_MAKESELF_PATH}/tmp/openssl"
+fi
+
+if [ -d "${NETDATA_MAKESELF_PATH}/tmp/openssl" ]; then
+ rm -rf "${NETDATA_MAKESELF_PATH}/tmp/openssl"
+fi
+
+cache="${NETDATA_SOURCE_PATH}/artifacts/cache/${BUILDARCH}/openssl"
+
+if [ -d "${cache}" ]; then
+ echo "Found cached copy of build directory for openssl, using it."
+ cp -a "${cache}/openssl" "${NETDATA_MAKESELF_PATH}/tmp/"
+ CACHE_HIT=1
+else
+ echo "No cached copy of build directory for openssl found, fetching sources instead."
+ run git clone --branch "${OPENSSL_VERSION}" --single-branch --depth 1 "${OPENSSL_SOURCE}" "${NETDATA_MAKESELF_PATH}/tmp/openssl"
+ CACHE_HIT=0
+fi
+
+cd "${NETDATA_MAKESELF_PATH}/tmp/openssl" || exit 1
+
+if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ sed -i "s/disable('static', 'pic', 'threads');/disable('static', 'pic');/" Configure
+ run ./config -static threads no-tests --prefix=/openssl-static --openssldir=/opt/netdata/etc/ssl
+ run make -j "$(nproc)"
+fi
+
+run make -j "$(nproc)" install_sw
+
+if [ -d "/openssl-static/lib" ]; then
+ cd "/openssl-static" || exit 1
+ ln -s "lib" "lib64" || true
+ cd - || exit 1
+fi
+
+store_cache openssl "${NETDATA_MAKESELF_PATH}/tmp/openssl"
+
+perl configdata.pm --dump
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/50-bash-5.1.16.install.sh b/packaging/makeself/jobs/50-bash-5.1.16.install.sh
new file mode 100755
index 00000000..7a302f2e
--- /dev/null
+++ b/packaging/makeself/jobs/50-bash-5.1.16.install.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+# Source of truth for all the packages we bundle in static builds
+. "$(dirname "${0}")/../bundled-packages"
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::building bash" || true
+
+fetch "bash-${BASH_VERSION}" "${BASH_ARTIFACT_SOURCE}/bash-${BASH_VERSION}.tar.gz" \
+ "${BASH_ARTIFACT_SHA256}" bash
+
+export CFLAGS="-pipe"
+export PKG_CONFIG_PATH="/openssl-static/lib64/pkgconfig"
+
+if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ run ./configure \
+ --prefix="${NETDATA_INSTALL_PATH}" \
+ --without-bash-malloc \
+ --enable-static-link \
+ --enable-net-redirections \
+ --enable-array-variables \
+ --disable-progcomp \
+ --disable-profiling \
+ --disable-nls \
+ --disable-dependency-tracking
+
+ run make clean
+ run make -j "$(nproc)"
+
+ cat > examples/loadables/Makefile <<-EOF
+ all:
+ clean:
+ install:
+ EOF
+fi
+
+run make install
+
+store_cache bash "${NETDATA_MAKESELF_PATH}/tmp/bash-${BASH_VERSION}"
+
+if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/bash
+fi
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/50-curl.install.sh b/packaging/makeself/jobs/50-curl.install.sh
new file mode 100755
index 00000000..824b3056
--- /dev/null
+++ b/packaging/makeself/jobs/50-curl.install.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+# Source of truth for all the packages we bundle in static builds
+. "$(dirname "${0}")/../bundled-packages"
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building cURL" || true
+
+if [ -d "${NETDATA_MAKESELF_PATH}/tmp/curl" ]; then
+ rm -rf "${NETDATA_MAKESELF_PATH}/tmp/curl"
+fi
+
+cache="${NETDATA_SOURCE_PATH}/artifacts/cache/${BUILDARCH}/curl"
+
+if [ -d "${cache}" ]; then
+ echo "Found cached copy of build directory for curl, using it."
+ cp -a "${cache}/curl" "${NETDATA_MAKESELF_PATH}/tmp/"
+ CACHE_HIT=1
+else
+ echo "No cached copy of build directory for curl found, fetching sources instead."
+ run git clone --branch "${CURL_VERSION}" --single-branch --depth 1 "${CURL_SOURCE}" "${NETDATA_MAKESELF_PATH}/tmp/curl"
+ CACHE_HIT=0
+fi
+
+cd "${NETDATA_MAKESELF_PATH}/tmp/curl" || exit 1
+
+export CFLAGS="-I/openssl-static/include -pipe"
+export LDFLAGS="-static -L/openssl-static/lib64"
+export PKG_CONFIG="pkg-config --static"
+export PKG_CONFIG_PATH="/openssl-static/lib64/pkgconfig"
+
+if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ run autoreconf -fi
+
+ run ./configure \
+ --prefix="/curl-local" \
+ --enable-optimize \
+ --disable-shared \
+ --enable-static \
+ --enable-http \
+ --disable-ldap \
+ --disable-ldaps \
+ --enable-proxy \
+ --disable-dict \
+ --disable-telnet \
+ --disable-tftp \
+ --disable-pop3 \
+ --disable-imap \
+ --disable-smb \
+ --disable-smtp \
+ --disable-gopher \
+ --enable-ipv6 \
+ --enable-cookies \
+ --with-ca-fallback \
+ --with-openssl \
+ --disable-dependency-tracking
+
+ # Curl autoconf does not honour the curl_LDFLAGS environment variable
+ run sed -i -e "s/LDFLAGS =/LDFLAGS = -all-static/" src/Makefile
+
+ run make clean
+ run make -j "$(nproc)"
+fi
+
+run make install
+
+store_cache curl "${NETDATA_MAKESELF_PATH}/tmp/curl"
+
+cp /curl-local/bin/curl "${NETDATA_INSTALL_PATH}"/bin/curl
+if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/curl
+fi
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Preparing build environment" || true
diff --git a/packaging/makeself/jobs/50-ioping-1.3.install.sh b/packaging/makeself/jobs/50-ioping-1.3.install.sh
new file mode 100755
index 00000000..6bd538e3
--- /dev/null
+++ b/packaging/makeself/jobs/50-ioping-1.3.install.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+# Source of truth for all the packages we bundle in static builds
+. "$(dirname "${0}")/../bundled-packages" || exit 1
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building ioping" || true
+
+fetch "ioping-${IOPING_VERSION}" "${IOPING_SOURCE}/archive/refs/tags/v${IOPING_VERSION}.tar.gz" \
+ "${IOPING_ARTIFACT_SHA256}" ioping
+
+export CFLAGS="-static -pipe"
+
+if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ run make clean
+ run make -j "$(nproc)"
+fi
+
+run mkdir -p "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/
+run install -o root -g root -m 4750 ioping "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/
+
+store_cache ioping "${NETDATA_MAKESELF_PATH}/tmp/ioping-${IOPING_VERSION}"
+
+if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/ioping
+fi
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh b/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh
new file mode 100755
index 00000000..82975217
--- /dev/null
+++ b/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Install the libnetfilter_acct and it's dependency libmnl
+
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+# Source of truth for all the packages we bundle in static builds
+. "$(dirname "${0}")/../bundled-packages" || exit 1
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::building libnetfilter_acct" || true
+
+export CFLAGS="-static -I/usr/include/libmnl -pipe"
+export LDFLAGS="-static -L/usr/lib -lmnl"
+export PKG_CONFIG="pkg-config --static"
+export PKG_CONFIG_PATH="/usr/lib/pkgconfig"
+
+fetch "libnetfilter_acct-${LIBNETFILTER_ACT_VERSION}" "${LIBNETFILTER_ACT_SOURCE}/libnetfilter_acct-${LIBNETFILTER_ACT_VERSION}.tar.bz2" \
+ "${LIBNETFILTER_ACT_ARTIFACT_SHA256}" libnetfilter_acct
+
+
+if [ "${CACHE_HIT:-0}" -eq 0 ]; then
+ run ./configure \
+ --prefix="/libnetfilter-acct-static" \
+ --exec-prefix="/libnetfilter-acct-static"
+
+ run make clean
+ run make -j "$(nproc)"
+fi
+
+run make install
+
+store_cache libnetfilter_acct "${NETDATA_MAKESELF_PATH}/tmp/libnetfilter_acct-${LIBNETFILTER_ACT_VERSION}"
+
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh
new file mode 100755
index 00000000..83d28bf8
--- /dev/null
+++ b/packaging/makeself/jobs/70-netdata-git.install.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/makeself/functions.sh
+. "${NETDATA_MAKESELF_PATH}"/functions.sh "${@}" || exit 1
+
+cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then
+ export CFLAGS="-ffunction-sections -fdata-sections -static -O2 -funroll-loops -I/openssl-static/include -I/libnetfilter-acct-static/include/libnetfilter_acct -I/curl-local/include/curl -I/usr/include/libmnl -pipe"
+else
+ export CFLAGS="-static -O1 -pipe -ggdb -Wall -Wextra -Wformat-signedness -DNETDATA_INTERNAL_CHECKS=1 -I/openssl-static/include -I/libnetfilter-acct-static/include/libnetfilter_acct -I/curl-local/include/curl -I/usr/include/libmnl"
+fi
+
+export LDFLAGS="-Wl,--gc-sections -static -L/openssl-static/lib64 -L/libnetfilter-acct-static/lib -lnetfilter_acct -L/usr/lib -lmnl -L/usr/lib -lzstd -L/curl-local/lib"
+
+# We export this to 'yes', installer sets this to .environment.
+# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one
+export IS_NETDATA_STATIC_BINARY="yes"
+
+# Set eBPF LIBC to "static" to bundle the `-static` variant of the kernel-collector
+export EBPF_LIBC="static"
+export PKG_CONFIG="pkg-config --static"
+export PKG_CONFIG_PATH="/openssl-static/lib64/pkgconfig:/libnetfilter-acct-static/lib/pkgconfig:/usr/lib/pkgconfig:/curl-local/lib/pkgconfig"
+
+# Set correct CMake flags for building against non-System OpenSSL
+# See: https://github.com/warmcat/libwebsockets/blob/master/READMEs/README.build.md
+export CMAKE_FLAGS="-DOPENSSL_ROOT_DIR=/openssl-static -DOPENSSL_LIBRARIES=/openssl-static/lib64 -DCMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE=/openssl-static -DLWS_OPENSSL_INCLUDE_DIRS=/openssl-static/include -DLWS_OPENSSL_LIBRARIES=/openssl-static/lib64/libssl.a;/openssl-static/lib64/libcrypto.a"
+
+run ./netdata-installer.sh \
+ --install-prefix "${NETDATA_INSTALL_PARENT}" \
+ --dont-wait \
+ --dont-start-it \
+ --disable-exporting-mongodb \
+ --require-cloud \
+ --use-system-protobuf \
+ --dont-scrub-cflags-even-though-it-may-break-things \
+ --one-time-build \
+ --enable-lto
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Finishing netdata install" || true
+
+# Properly mark the install type
+cat > "${NETDATA_INSTALL_PATH}/etc/netdata/.install-type" <<-EOF
+ INSTALL_TYPE='manual-static'
+ PREBUILT_ARCH='${BUILDARCH}'
+ EOF
+
+# Remove the netdata.conf file from the tree. It has hard-coded sensible defaults builtin.
+run rm -f "${NETDATA_INSTALL_PATH}/etc/netdata/netdata.conf"
+
+# Ensure the netdata binary is in fact statically linked
+if run readelf -l "${NETDATA_INSTALL_PATH}"/bin/netdata | grep 'INTERP'; then
+ printf >&2 "Ooops. %s is not a statically linked binary!\n" "${NETDATA_INSTALL_PATH}"/bin/netdata
+ ldd "${NETDATA_INSTALL_PATH}"/bin/netdata
+ exit 1
+fi
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
diff --git a/packaging/makeself/jobs/90-netdata-runtime-check.sh b/packaging/makeself/jobs/90-netdata-runtime-check.sh
new file mode 100755
index 00000000..a3c94ffc
--- /dev/null
+++ b/packaging/makeself/jobs/90-netdata-runtime-check.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/makeself/functions.sh
+. "${NETDATA_MAKESELF_PATH}"/functions.sh "${@}" || exit 1
+
+dump_log() {
+ cat ./netdata.log
+}
+
+wait_for() {
+ host="${1}"
+ port="${2}"
+ name="${3}"
+ timeout="30"
+
+ if command -v nc > /dev/null ; then
+ netcat="nc"
+ elif command -v netcat > /dev/null ; then
+ netcat="netcat"
+ else
+ printf "Unable to find a usable netcat command.\n"
+ return 1
+ fi
+
+ printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}"
+
+ sleep 30
+
+ i=0
+ while ! ${netcat} -z "${host}" "${port}"; do
+ sleep 1
+ if [ "$i" -gt "$timeout" ]; then
+ printf "Timed out!\n"
+ return 1
+ fi
+ i="$((i + 1))"
+ done
+ printf "OK\n"
+}
+
+trap dump_log EXIT
+
+"${NETDATA_INSTALL_PATH}/bin/netdata" -D > ./netdata.log 2>&1 &
+
+wait_for localhost 19999 netdata || exit 1
+
+curl -sS http://127.0.0.1:19999/api/v1/info > ./response || exit 1
+
+cat ./response
+
+jq '.version' ./response || exit 1
+
+trap - EXIT
diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh
new file mode 100755
index 00000000..2695e8eb
--- /dev/null
+++ b/packaging/makeself/jobs/99-makeself.install.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building self-extracting archive" || true
+
+run cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+# -----------------------------------------------------------------------------
+# find the netdata version
+
+VERSION="$("${NETDATA_INSTALL_PARENT}/netdata/bin/netdata" -v | cut -f 2 -d ' ')"
+
+if [ "${VERSION}" == "" ]; then
+ echo >&2 "Cannot find version number. Create makeself executable from source code with git tree structure."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# copy the files needed by makeself installation
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/system"
+
+run cp \
+ packaging/makeself/post-installer.sh \
+ packaging/makeself/install-or-update.sh \
+ packaging/installer/functions.sh \
+ "${NETDATA_INSTALL_PATH}/system/"
+
+# -----------------------------------------------------------------------------
+# create a wrapper to start our netdata with a modified path
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv"
+
+run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \
+ "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1
+
+cat > "${NETDATA_INSTALL_PATH}/bin/netdata" << EOF
+#!${NETDATA_INSTALL_PATH}/bin/bash
+export NETDATA_BASH_LOADABLES="DISABLE"
+export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}"
+exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}"
+EOF
+run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata"
+
+# -----------------------------------------------------------------------------
+# the claiming script must be in the same directory as the netdata binary for web-based claiming to work
+
+run ln -s "${NETDATA_INSTALL_PATH}/bin/netdata-claim.sh" \
+ "${NETDATA_INSTALL_PATH}/bin/srv/netdata-claim.sh" || exit 1
+
+# -----------------------------------------------------------------------------
+# copy the SSL/TLS configuration and certificates from the build system
+
+run cp -a /etc/ssl "${NETDATA_INSTALL_PATH}/share/ssl"
+
+# -----------------------------------------------------------------------------
+# remove the links to allow untaring the archive
+
+run rm "${NETDATA_INSTALL_PATH}/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/bin" \
+ "${NETDATA_INSTALL_PATH}/usr/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/local"
+
+# -----------------------------------------------------------------------------
+# ensure required directories actually exist
+
+for dir in var/lib/netdata var/cache/netdata var/log/netdata ; do
+ run mkdir -p "${NETDATA_INSTALL_PATH}/${dir}"
+ run touch "${NETDATA_INSTALL_PATH}/${dir}/.keep"
+done
+
+# -----------------------------------------------------------------------------
+# create the makeself archive
+
+run sed "s|NETDATA_VERSION|${VERSION}|g" < "${NETDATA_MAKESELF_PATH}/makeself.lsm" > "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+run "${NETDATA_MAKESELF_PATH}/makeself.sh" \
+ --gzip \
+ --complevel 9 \
+ --notemp \
+ --needroot \
+ --target "${NETDATA_INSTALL_PATH}" \
+ --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \
+ --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \
+ --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \
+ --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \
+ "${NETDATA_INSTALL_PATH}" \
+ "${NETDATA_INSTALL_PATH}.gz.run" \
+ "netdata, the real-time performance and health monitoring system" \
+ ./system/post-installer.sh
+
+run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+# -----------------------------------------------------------------------------
+# copy it to the netdata build dir
+
+FILE="netdata-${BUILDARCH}-${VERSION}.gz.run"
+
+run mkdir -p artifacts
+run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}"
+
+[ -f "netdata-${BUILDARCH}-latest.gz.run" ] && rm "netdata-${BUILDARCH}-latest.gz.run"
+run ln -s "artifacts/${FILE}" "netdata-${BUILDARCH}-latest.gz.run"
+
+if [ "${BUILDARCH}" = "x86_64" ]; then
+ [ -f "netdata-latest.gz.run" ] && rm "netdata-latest.gz.run"
+ run ln -s "artifacts/${FILE}" "netdata-latest.gz.run"
+ [ -f "artifacts/netdata-${VERSION}.gz.run" ] && rm "netdata-${VERSION}.gz.run"
+ run ln -s "./${FILE}" "artifacts/netdata-${VERSION}.gz.run"
+fi
+
+# shellcheck disable=SC2015
+[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true
+
+echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'"
diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh
new file mode 100755
index 00000000..47992b2c
--- /dev/null
+++ b/packaging/makeself/makeself-header.sh
@@ -0,0 +1,717 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# shellcheck shell=sh
+# shellcheck disable=SC2154,SC2039
+cat << EOF > "$archname"
+#!/bin/sh
+# This script was generated using Makeself $MS_VERSION
+# The license covering this archive and its contents, if any, is wholly independent of the Makeself license (GPL)
+
+ORIG_UMASK=\`umask\`
+if test "$KEEP_UMASK" = n; then
+ umask 077
+fi
+
+CRCsum="$CRCsum"
+MD5="$MD5sum"
+SHA="$SHAsum"
+SIGNATURE="$Signature"
+TMPROOT=\${TMPDIR:=/tmp}
+USER_PWD="\$PWD"
+export USER_PWD
+ARCHIVE_DIR=\`dirname "\$0"\`
+export ARCHIVE_DIR
+
+label="$LABEL"
+script="$SCRIPT"
+scriptargs="$SCRIPTARGS"
+cleanup_script="${CLEANUP_SCRIPT}"
+licensetxt="$LICENSE"
+helpheader="${HELPHEADER}"
+targetdir="$archdirname"
+filesizes="$filesizes"
+totalsize="$totalsize"
+keep="$KEEP"
+nooverwrite="$NOOVERWRITE"
+quiet="n"
+accept="n"
+nodiskspace="n"
+export_conf="$EXPORT_CONF"
+decrypt_cmd="$DECRYPT_CMD"
+skip="$SKIP"
+
+print_cmd_arg=""
+if type printf > /dev/null; then
+ print_cmd="printf"
+elif test -x /usr/ucb/echo; then
+ print_cmd="/usr/ucb/echo"
+else
+ print_cmd="echo"
+fi
+
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:\$PATH
+ export PATH
+fi
+
+if test -d /usr/sfw/bin; then
+ PATH=\$PATH:/usr/sfw/bin
+ export PATH
+fi
+
+unset CDPATH
+
+MS_Printf()
+{
+ \$print_cmd \$print_cmd_arg "\$1"
+}
+
+MS_PrintLicense()
+{
+ PAGER=\${PAGER:=more}
+ if test x"\$licensetxt" != x; then
+ PAGER_PATH=\`exec <&- 2>&-; which \$PAGER || command -v \$PAGER || type \$PAGER\`
+ if test -x "\$PAGER_PATH" && test x"\$accept" != xy; then
+ echo "\$licensetxt" | \$PAGER
+ else
+ echo "\$licensetxt"
+ fi
+ if test x"\$accept" != xy; then
+ while true
+ do
+ MS_Printf "Please type y to accept, n otherwise: "
+ read yn
+ if test x"\$yn" = xn; then
+ keep=n
+ eval \$finish; exit 1
+ break;
+ elif test x"\$yn" = xy; then
+ break;
+ fi
+ done
+ fi
+ fi
+}
+
+MS_diskspace()
+{
+ (
+ df -k "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }'
+ )
+}
+
+MS_dd()
+{
+ blocks=\`expr \$3 / 1024\`
+ bytes=\`expr \$3 % 1024\`
+ # Test for ibs, obs and conv feature
+ if dd if=/dev/zero of=/dev/null count=1 ibs=512 obs=512 conv=sync 2> /dev/null; then
+ dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\
+ { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\
+ test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null
+ else
+ dd if="\$1" bs=\$2 skip=1 2> /dev/null
+ fi
+}
+
+MS_dd_Progress()
+{
+ if test x"\$noprogress" = xy; then
+ MS_dd "\$@"
+ return \$?
+ fi
+ file="\$1"
+ offset=\$2
+ length=\$3
+ pos=0
+ bsize=4194304
+ while test \$bsize -gt \$length; do
+ bsize=\`expr \$bsize / 4\`
+ done
+ blocks=\`expr \$length / \$bsize\`
+ bytes=\`expr \$length % \$bsize\`
+ (
+ dd ibs=\$offset skip=1 count=1 2>/dev/null
+ pos=\`expr \$pos \+ \$bsize\`
+ MS_Printf " 0%% " 1>&2
+ if test \$blocks -gt 0; then
+ while test \$pos -le \$length; do
+ dd bs=\$bsize count=1 2>/dev/null
+ pcent=\`expr \$length / 100\`
+ pcent=\`expr \$pos / \$pcent\`
+ if test \$pcent -lt 100; then
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ if test \$pcent -lt 10; then
+ MS_Printf " \$pcent%% " 1>&2
+ else
+ MS_Printf " \$pcent%% " 1>&2
+ fi
+ fi
+ pos=\`expr \$pos \+ \$bsize\`
+ done
+ fi
+ if test \$bytes -gt 0; then
+ dd bs=\$bytes count=1 2>/dev/null
+ fi
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ MS_Printf " 100%% " 1>&2
+ ) < "\$file"
+}
+
+MS_Help()
+{
+ cat << EOH >&2
+Makeself version $MS_VERSION
+ 1) Getting help or info about \$0 :
+ \$0 --help Print this message
+ \$0 --info Print embedded info : title, default target directory, embedded script ...
+ \$0 --lsm Print embedded lsm entry (or no LSM)
+ \$0 --list Print the list of files in the archive
+ \$0 --check Checks integrity of the archive
+ \$0 --verify-sig key Verify signature agains a provided key id
+
+ 2) Running \$0 :
+ \$0 [options] [--] [additional arguments to embedded script]
+ with following options (in that order)
+ --confirm Ask before running embedded script
+ --quiet Do not print anything except error messages
+ --accept Accept the license
+ --noexec Do not run embedded script (implies --noexec-cleanup)
+ --noexec-cleanup Do not run embedded cleanup script
+ --keep Do not erase target directory after running
+ the embedded script
+ --noprogress Do not show the progress during the decompression
+ --nox11 Do not spawn an xterm
+ --nochown Do not give the target folder to the current user
+ --chown Give the target folder to the current user recursively
+ --nodiskspace Do not check for available disk space
+ --target dir Extract directly to a target directory (absolute or relative)
+ This directory may undergo recursive chown (see --nochown).
+ --tar arg1 [arg2 ...] Access the contents of the archive through the tar command
+ --ssl-pass-src src Use the given src as the source of password to decrypt the data
+ using OpenSSL. See "PASS PHRASE ARGUMENTS" in man openssl.
+ Default is to prompt the user to enter decryption password
+ on the current terminal.
+ --cleanup-args args Arguments to the cleanup script. Wrap in quotes to provide
+ multiple arguments.
+ -- Following arguments will be passed to the embedded script\${helpheader}
+EOH
+}
+
+MS_Verify_Sig()
+{
+ GPG_PATH=\`exec <&- 2>&-; which gpg || command -v gpg || type gpg\`
+ MKTEMP_PATH=\`exec <&- 2>&-; which mktemp || command -v mktemp || type mktemp\`
+ test -x "\$GPG_PATH" || GPG_PATH=\`exec <&- 2>&-; which gpg || command -v gpg || type gpg\`
+ test -x "\$MKTEMP_PATH" || MKTEMP_PATH=\`exec <&- 2>&-; which mktemp || command -v mktemp || type mktemp\`
+ offset=\`head -n "\$skip" "\$1" | wc -c | sed "s/ //g"\`
+ temp_sig=\`mktemp -t XXXXX\`
+ echo \$SIGNATURE | base64 --decode > "\$temp_sig"
+ gpg_output=\`MS_dd "\$1" \$offset \$totalsize | LC_ALL=C "\$GPG_PATH" --verify "\$temp_sig" - 2>&1\`
+ gpg_res=\$?
+ rm -f "\$temp_sig"
+ if test \$gpg_res -eq 0 && test \`echo \$gpg_output | grep -c Good\` -eq 1; then
+ if test \`echo \$gpg_output | grep -c \$sig_key\` -eq 1; then
+ test x"\$quiet" = xn && echo "GPG signature is good" >&2
+ else
+ echo "GPG Signature key does not match" >&2
+ exit 2
+ fi
+ else
+ test x"\$quiet" = xn && echo "GPG signature failed to verify" >&2
+ exit 2
+ fi
+}
+
+MS_Check()
+{
+ OLD_PATH="\$PATH"
+ PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\`
+ PATH="\$OLD_PATH"
+
+ SHA_PATH=\`exec <&- 2>&-; which shasum || command -v shasum || type shasum\`
+ test -x "\$SHA_PATH" || SHA_PATH=\`exec <&- 2>&-; which sha256sum || command -v sha256sum || type sha256sum\`
+
+ if test x"\$quiet" = xn; then
+ MS_Printf "Verifying archive integrity..."
+ fi
+ offset=\`head -n "\$skip" "\$1" | wc -c | sed "s/ //g"\`
+ fsize=\`cat "\$1" | wc -c | sed "s/ //g"\`
+ if test \$totalsize -ne \`expr \$fsize - \$offset\`; then
+ echo " Unexpected archive size." >&2
+ exit 2
+ fi
+ verb=\$2
+ i=1
+ for s in \$filesizes
+ do
+ crc=\`echo \$CRCsum | cut -d" " -f\$i\`
+ if test -x "\$SHA_PATH"; then
+ if test x"\`basename \$SHA_PATH\`" = xshasum; then
+ SHA_ARG="-a 256"
+ fi
+ sha=\`echo \$SHA | cut -d" " -f\$i\`
+ if test x"\$sha" = x0000000000000000000000000000000000000000000000000000000000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain an embedded SHA256 checksum." >&2
+ else
+ shasum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$SHA_PATH \$SHA_ARG" | cut -b-64\`;
+ if test x"\$shasum" != x"\$sha"; then
+ echo "Error in SHA256 checksums: \$shasum is different from \$sha" >&2
+ exit 2
+ elif test x"\$quiet" = xn; then
+ MS_Printf " SHA256 checksums are OK." >&2
+ fi
+ crc="0000000000";
+ fi
+ fi
+ if test -x "\$MD5_PATH"; then
+ if test x"\`basename \$MD5_PATH\`" = xdigest; then
+ MD5_ARG="-a md5"
+ fi
+ md5=\`echo \$MD5 | cut -d" " -f\$i\`
+ if test x"\$md5" = x00000000000000000000000000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2
+ else
+ md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`;
+ if test x"\$md5sum" != x"\$md5"; then
+ echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2
+ exit 2
+ elif test x"\$quiet" = xn; then
+ MS_Printf " MD5 checksums are OK." >&2
+ fi
+ crc="0000000000"; verb=n
+ fi
+ fi
+ if test x"\$crc" = x0000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2
+ else
+ sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\`
+ if test x"\$sum1" != x"\$crc"; then
+ echo "Error in checksums: \$sum1 is different from \$crc" >&2
+ exit 2
+ elif test x"\$quiet" = xn; then
+ MS_Printf " CRC checksums are OK." >&2
+ fi
+ fi
+ i=\`expr \$i + 1\`
+ offset=\`expr \$offset + \$s\`
+ done
+ if test x"\$quiet" = xn; then
+ echo " All good."
+ fi
+}
+
+MS_Decompress()
+{
+ if test x"\$decrypt_cmd" != x""; then
+ { eval "\$decrypt_cmd" || echo " ... Decryption failed." >&2; } | eval "$GUNZIP_CMD"
+ else
+ eval "$GUNZIP_CMD"
+ fi
+
+ if test \$? -ne 0; then
+ echo " ... Decompression failed." >&2
+ fi
+}
+
+UnTAR()
+{
+ if test x"\$quiet" = xn; then
+ tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." >&2; kill -15 \$$; }
+ else
+ tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. >&2; kill -15 \$$; }
+ fi
+}
+
+MS_exec_cleanup() {
+ if test x"\$cleanup" = xy && test x"\$cleanup_script" != x""; then
+ cleanup=n
+ cd "\$tmpdir"
+ eval "\"\$cleanup_script\" \$scriptargs \$cleanupargs"
+ fi
+}
+
+MS_cleanup()
+{
+ echo 'Signal caught, cleaning up' >&2
+ MS_exec_cleanup
+ cd "\$TMPROOT"
+ rm -rf "\$tmpdir"
+ eval \$finish; exit 15
+}
+
+finish=true
+xterm_loop=
+noprogress=$NOPROGRESS
+nox11=$NOX11
+copy=$COPY
+ownership=$OWNERSHIP
+verbose=n
+cleanup=y
+cleanupargs=
+sig_key=
+
+initargs="\$@"
+
+while true
+do
+ case "\$1" in
+ -h | --help)
+ MS_Help
+ exit 0
+ ;;
+ -q | --quiet)
+ quiet=y
+ noprogress=y
+ shift
+ ;;
+ --accept)
+ accept=y
+ shift
+ ;;
+ --info)
+ echo Identification: "\$label"
+ echo Target directory: "\$targetdir"
+ echo Uncompressed size: $USIZE KB
+ echo Compression: $COMPRESS
+ if test x"$ENCRYPT" != x""; then
+ echo Encryption: $ENCRYPT
+ fi
+ echo Date of packaging: $DATE
+ echo Built with Makeself version $MS_VERSION
+ echo Build command was: "$MS_COMMAND"
+ if test x"\$script" != x; then
+ echo Script run after extraction:
+ echo " " \$script \$scriptargs
+ fi
+ if test x"$copy" = xcopy; then
+ echo "Archive will copy itself to a temporary location"
+ fi
+ if test x"$NEED_ROOT" = xy; then
+ echo "Root permissions required for extraction"
+ fi
+ if test x"$KEEP" = xy; then
+ echo "directory \$targetdir is permanent"
+ else
+ echo "\$targetdir will be removed after extraction"
+ fi
+ exit 0
+ ;;
+ --dumpconf)
+ echo LABEL=\"\$label\"
+ echo SCRIPT=\"\$script\"
+ echo SCRIPTARGS=\"\$scriptargs\"
+ echo CLEANUPSCRIPT=\"\$cleanup_script\"
+ echo archdirname=\"$archdirname\"
+ echo KEEP=$KEEP
+ echo NOOVERWRITE=$NOOVERWRITE
+ echo COMPRESS=$COMPRESS
+ echo filesizes=\"\$filesizes\"
+ echo totalsize=\"\$totalsize\"
+ echo CRCsum=\"\$CRCsum\"
+ echo MD5sum=\"\$MD5sum\"
+ echo SHAsum=\"\$SHAsum\"
+ echo SKIP=\"\$skip\"
+ exit 0
+ ;;
+ --lsm)
+cat << EOLSM
+EOF
+eval "$LSM_CMD"
+cat << EOF >> "$archname"
+EOLSM
+ exit 0
+ ;;
+ --list)
+ echo Target directory: \$targetdir
+ offset=\`head -n "\$skip" "\$0" | wc -c | sed "s/ //g"\`
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | MS_Decompress | UnTAR t
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --tar)
+ offset=\`head -n "\$skip" "\$0" | wc -c | sed "s/ //g"\`
+ arg1="\$2"
+ shift 2 || { MS_Help; exit 1; }
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | MS_Decompress | tar "\$arg1" - "\$@"
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --check)
+ MS_Check "\$0" y
+ exit 0
+ ;;
+ --verify-sig)
+ sig_key="\$2"
+ shift 2 || { MS_Help; exit 1; }
+ MS_Verify_Sig "\$0"
+ ;;
+ --confirm)
+ verbose=y
+ shift
+ ;;
+ --noexec)
+ script=""
+ cleanup_script=""
+ shift
+ ;;
+ --noexec-cleanup)
+ cleanup_script=""
+ shift
+ ;;
+ --keep)
+ keep=y
+ shift
+ ;;
+ --target)
+ keep=y
+ targetdir="\${2:-.}"
+ shift 2 || { MS_Help; exit 1; }
+ ;;
+ --noprogress)
+ noprogress=y
+ shift
+ ;;
+ --nox11)
+ nox11=y
+ shift
+ ;;
+ --nochown)
+ ownership=n
+ shift
+ ;;
+ --chown)
+ ownership=y
+ shift
+ ;;
+ --nodiskspace)
+ nodiskspace=y
+ shift
+ ;;
+ --xwin)
+ if test "$NOWAIT" = n; then
+ finish="echo Press Return to close this window...; read junk"
+ fi
+ xterm_loop=1
+ shift
+ ;;
+ --phase2)
+ copy=phase2
+ shift
+ ;;
+ --ssl-pass-src)
+ if test x"$ENCRYPT" != x"openssl"; then
+ echo "Invalid option --ssl-pass-src: \$0 was not encrypted with OpenSSL!" >&2
+ exit 1
+ fi
+ decrypt_cmd="\$decrypt_cmd -pass \$2"
+ shift 2 || { MS_Help; exit 1; }
+ ;;
+ --cleanup-args)
+ cleanupargs="\$2"
+ shift 2 || { MS_Help; exit 1; }
+ ;;
+ --)
+ shift
+ break ;;
+ -*)
+ echo Unrecognized flag : "\$1" >&2
+ MS_Help
+ exit 1
+ ;;
+ *)
+ break ;;
+ esac
+done
+
+if test x"\$quiet" = xy -a x"\$verbose" = xy; then
+ echo Cannot be verbose and quiet at the same time. >&2
+ exit 1
+fi
+
+if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then
+ echo "Administrative privileges required for this archive (use su or sudo)" >&2
+ exit 1
+fi
+
+if test x"\$copy" \!= xphase2; then
+ MS_PrintLicense
+fi
+
+case "\$copy" in
+copy)
+ tmpdir="\$TMPROOT"/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$
+ mkdir "\$tmpdir" || {
+ echo "Could not create temporary directory \$tmpdir" >&2
+ exit 1
+ }
+ SCRIPT_COPY="\$tmpdir/makeself"
+ echo "Copying to a temporary location..." >&2
+ cp "\$0" "\$SCRIPT_COPY"
+ chmod +x "\$SCRIPT_COPY"
+ cd "\$TMPROOT"
+ export USER_PWD="\$tmpdir"
+ exec "\$SCRIPT_COPY" --phase2 -- \$initargs
+ ;;
+phase2)
+ finish="\$finish ; rm -rf \`dirname \$0\`"
+ ;;
+esac
+
+if test x"\$nox11" = xn; then
+ if test -t 1; then # Do we have a terminal on stdout?
+ :
+ else
+ if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X?
+ if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable
+ GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology"
+ for a in \$GUESS_XTERMS; do
+ if type \$a >/dev/null 2>&1; then
+ XTERM=\$a
+ break
+ fi
+ done
+ chmod a+x \$0 || echo Please add execution rights on \$0 >&2
+ if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal!
+ exec \$XTERM -e "\$0 --xwin \$initargs"
+ else
+ exec \$XTERM -e "./\$0 --xwin \$initargs"
+ fi
+ fi
+ fi
+ fi
+fi
+
+if test x"\$targetdir" = x.; then
+ tmpdir="."
+else
+ if test x"\$keep" = xy; then
+ if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then
+ echo "Target directory \$targetdir already exists, aborting." >&2
+ exit 1
+ fi
+ if test x"\$quiet" = xn; then
+ echo "Creating directory \$targetdir" >&2
+ fi
+ tmpdir="\$targetdir"
+ dashp="-p"
+ else
+ tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM"
+ dashp=""
+ fi
+ mkdir \$dashp "\$tmpdir" || {
+ echo 'Cannot create target directory' \$tmpdir >&2
+ echo 'You should try option --target dir' >&2
+ eval \$finish
+ exit 1
+ }
+fi
+
+location="\`pwd\`"
+if test x"\$SETUP_NOCHECK" != x1; then
+ MS_Check "\$0"
+fi
+offset=\`head -n "\$skip" "\$0" | wc -c | sed "s/ //g"\`
+
+if test x"\$verbose" = xy; then
+ MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] "
+ read yn
+ if test x"\$yn" = xn; then
+ eval \$finish; exit 1
+ fi
+fi
+
+if test x"\$quiet" = xn; then
+ # Decrypting with openssl will ask for password,
+ # the prompt needs to start on new line
+ if test x"$ENCRYPT" = x"openssl"; then
+ echo "Decrypting and uncompressing \$label..."
+ else
+ MS_Printf "Uncompressing \$label"
+ fi
+fi
+res=3
+if test x"\$keep" = xn; then
+ trap MS_cleanup 1 2 3 15
+fi
+
+if test x"\$nodiskspace" = xn; then
+ leftspace=\`MS_diskspace "\$tmpdir"\`
+ if test -n "\$leftspace"; then
+ if test "\$leftspace" -lt $USIZE; then
+ echo
+ echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2
+ echo "Use --nodiskspace option to skip this check and proceed anyway" >&2
+ if test x"\$keep" = xn; then
+ echo "Consider setting TMPDIR to a directory with more free space."
+ fi
+ eval \$finish; exit 1
+ fi
+ fi
+fi
+
+for s in \$filesizes
+do
+ if MS_dd_Progress "\$0" \$offset \$s | MS_Decompress | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then
+ if test x"\$ownership" = xy; then
+ (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .)
+ fi
+ else
+ echo >&2
+ echo "Unable to decompress \$0" >&2
+ eval \$finish; exit 1
+ fi
+ offset=\`expr \$offset + \$s\`
+done
+if test x"\$quiet" = xn; then
+ echo
+fi
+
+cd "\$tmpdir"
+res=0
+if test x"\$script" != x; then
+ if test x"\$export_conf" = x"y"; then
+ MS_BUNDLE="\$0"
+ MS_LABEL="\$label"
+ MS_SCRIPT="\$script"
+ MS_SCRIPTARGS="\$scriptargs"
+ MS_ARCHDIRNAME="\$archdirname"
+ MS_KEEP="\$KEEP"
+ MS_NOOVERWRITE="\$NOOVERWRITE"
+ MS_COMPRESS="\$COMPRESS"
+ MS_CLEANUP="\$cleanup"
+ export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS
+ export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS
+ fi
+
+ if test x"\$verbose" = x"y"; then
+ MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] "
+ read yn
+ if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?;
+ fi
+ else
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?
+ fi
+ if test "\$res" -ne 0; then
+ test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2
+ fi
+fi
+
+MS_exec_cleanup
+
+if test x"\$keep" = xn; then
+ cd "\$TMPROOT"
+ rm -rf "\$tmpdir"
+fi
+eval \$finish; exit \$res
+EOF
diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt
new file mode 100644
index 00000000..8ed15e2e
--- /dev/null
+++ b/packaging/makeself/makeself-help-header.txt
@@ -0,0 +1,49 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /lib/systemd/system/netdata.service
+
+ or, for older CentOS, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE.md
+
+ Anonymous stat collection and reporting to Netdata is enabled
+ by default. To disable, pass --disable-telemetry option to the installer
+ or export the environment variable DISABLE_TELEMETRY to a non-zero or non-empty
+ value (e.g export DISABLE_TELEMETRY=1).
diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt
new file mode 100644
index 00000000..684f16ae
--- /dev/null
+++ b/packaging/makeself/makeself-license.txt
@@ -0,0 +1,41 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017-2023, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ If a supported init system is detected, appropriate configuration will be
+ installed to allow Netdata to run as a system service. We currently support
+ systemd, OpenRC, LSB init scripts, and traditional init.d setups, as well as
+ having experimental support for runit.
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE
diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm
new file mode 100644
index 00000000..7d635646
--- /dev/null
+++ b/packaging/makeself/makeself.lsm
@@ -0,0 +1,16 @@
+Begin3
+Title: netdata
+Version: NETDATA_VERSION
+Description: netdata is a system for distributed real-time performance and health monitoring.
+ It provides unparalleled insights, in real-time, of everything happening on the
+ system it runs (including applications such as web and database servers), using
+ modern interactive web dashboards. netdata is fast and efficient, designed to
+ permanently run on all systems (physical & virtual servers, containers, IoT
+ devices), without disrupting their core function.
+Keywords: real-time performance and health monitoring
+Author: Costa Tsaousis (costa@tsaousis.gr)
+Maintained-by: Costa Tsaousis (costa@tsaousis.gr)
+Original-site: https://netdata.cloud/
+Platform: Unix
+Copying-policy: GPL
+End
diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh
new file mode 100755
index 00000000..3a975068
--- /dev/null
+++ b/packaging/makeself/makeself.sh
@@ -0,0 +1,780 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# shellcheck disable=SC2209,SC2006,SC2016,SC2034,SC2086,SC2003,SC2268,SC1090,SC2002,SC2046
+#
+# Makeself version 2.5.x
+# by Stephane Peter <megastep@megastep.org>
+#
+# Utility to create self-extracting tar.gz archives.
+# The resulting archive is a file holding the tar.gz archive with
+# a small Shell script stub that uncompresses the archive to a temporary
+# directory and then executes a given script from withing that directory.
+#
+# Makeself home page: https://makeself.io/ - Version history available on GitHub
+#
+# (C) 1998-2023 by Stephane Peter <megastep@megastep.org>
+#
+# This software is released under the terms of the GNU GPL version 2 and above
+# Please read the license at http://www.gnu.org/copyleft/gpl.html
+# Self-extracting archives created with this script are explictly NOT released under the term of the GPL
+#
+
+MS_VERSION=2.5.0
+MS_COMMAND="$0"
+unset CDPATH
+
+for f in ${1+"$@"}; do
+ MS_COMMAND="$MS_COMMAND \\\\
+ \\\"$f\\\""
+done
+
+# For Solaris systems
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:$PATH
+ export PATH
+fi
+
+# Procedures
+
+MS_Usage()
+{
+ echo "Usage: $0 [args] archive_dir file_name label startup_script [script_args]"
+ echo "args can be one or more of the following :"
+ echo " --version | -v : Print out Makeself version number and exit"
+ echo " --help | -h : Print out this help message"
+ echo " --tar-quietly : Suppress verbose output from the tar command"
+ echo " --quiet | -q : Do not print any messages other than errors."
+ echo " --gzip : Compress using gzip (default if detected)"
+ echo " --pigz : Compress with pigz"
+ echo " --zstd : Compress with zstd"
+ echo " --bzip2 : Compress using bzip2 instead of gzip"
+ echo " --pbzip2 : Compress using pbzip2 instead of gzip"
+ echo " --bzip3 : Compress using bzip3 instead of gzip"
+ echo " --xz : Compress using xz instead of gzip"
+ echo " --lzo : Compress using lzop instead of gzip"
+ echo " --lz4 : Compress using lz4 instead of gzip"
+ echo " --compress : Compress using the UNIX 'compress' command"
+ echo " --complevel lvl : Compression level for gzip pigz zstd xz lzo lz4 bzip2 pbzip2 and bzip3 (default 9)"
+ echo " --threads thds : Number of threads to be used by compressors that support parallelization."
+ echo " Omit to use compressor's default. Most useful (and required) for opting"
+ echo " into xz's threading, usually with '--threads=0' for all available cores."
+ echo " pbzip2 and pigz are parallel by default, and setting this value allows"
+ echo " limiting the number of threads they use."
+ echo " --base64 : Instead of compressing, encode the data using base64"
+ echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG"
+ echo " --gpg-asymmetric-encrypt-sign"
+ echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG"
+ echo " --gpg-extra opt : Append more options to the gpg command line"
+ echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL"
+ echo " --ssl-passwd pass : Use the given password to encrypt the data using OpenSSL"
+ echo " --ssl-pass-src src : Use the given src as the source of password to encrypt the data"
+ echo " using OpenSSL. See \"PASS PHRASE ARGUMENTS\" in man openssl."
+ echo " If this option is not supplied, the user will be asked to enter"
+ echo " encryption password on the current terminal."
+ echo " --ssl-no-md : Do not use \"-md\" option not supported by older OpenSSL."
+ echo " --nochown : Do not give the target folder to the current user (default)"
+ echo " --chown : Give the target folder to the current user recursively"
+ echo " --nocomp : Do not compress the data"
+ echo " --notemp : The archive will create archive_dir in the current directory"
+ echo " and uncompress in ./archive_dir"
+ echo " Note: persistent archives do not strictly require a startup_script"
+ echo " --needroot : Check that the root user is extracting the archive before proceeding"
+ echo " --copy : Upon extraction, the archive will first copy itself to"
+ echo " a temporary directory"
+ echo " --append : Append more files to an existing Makeself archive"
+ echo " The label and startup scripts will then be ignored"
+ echo " --target dir : Extract directly to a target directory"
+ echo " directory path can be either absolute or relative"
+ echo " --current : Files will be extracted to the current directory"
+ echo " Both --current and --target imply --notemp, and do not require a startup_script"
+ echo " --nooverwrite : Do not extract the archive if the specified target directory exists"
+ echo " --tar-format opt : Specify a tar archive format (default is ustar)"
+ echo " --tar-extra opt : Append more options to the tar command line"
+ echo " --untar-extra opt : Append more options to the during the extraction of the tar archive"
+ echo " --nomd5 : Don't calculate an MD5 for archive"
+ echo " --nocrc : Don't calculate a CRC for archive"
+ echo " --sha256 : Compute a SHA256 checksum for the archive"
+ echo " --header file : Specify location of the header script"
+ echo " --cleanup file : Specify a cleanup script that executes on interrupt and when finished successfully."
+ echo " --follow : Follow the symlinks in the archive"
+ echo " --noprogress : Do not show the progress during the decompression"
+ echo " --nox11 : Disable automatic spawn of a xterm"
+ echo " --nowait : Do not wait for user input after executing embedded"
+ echo " program from an xterm"
+ echo " --sign passphrase : Signature private key to sign the package with"
+ echo " --lsm file : LSM file describing the package"
+ echo " --license file : Append a license file"
+ echo " --help-header file : Add a header to the archive's --help output"
+ echo " --packaging-date date"
+ echo " : Use provided string as the packaging date"
+ echo " instead of the current date."
+ echo
+ echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive."
+ echo " --export-conf : Export configuration variables to startup_script"
+ echo
+ echo "Do not forget to give a fully qualified startup script name"
+ echo "(i.e. with a ./ prefix if inside the archive)."
+ exit 1
+}
+
+# Default settings
+if type gzip >/dev/null 2>&1; then
+ COMPRESS=gzip
+elif type compress >/dev/null 2>&1; then
+ COMPRESS=compress
+else
+ echo "ERROR: missing commands: gzip, compress" >&2
+ MS_Usage
+fi
+ENCRYPT=n
+PASSWD=""
+PASSWD_SRC=""
+OPENSSL_NO_MD=n
+COMPRESS_LEVEL=9
+DEFAULT_THREADS=123456 # Sentinel value
+THREADS=$DEFAULT_THREADS
+KEEP=n
+CURRENT=n
+NOX11=n
+NOWAIT=n
+APPEND=n
+TAR_QUIETLY=n
+KEEP_UMASK=n
+QUIET=n
+NOPROGRESS=n
+COPY=none
+NEED_ROOT=n
+TAR_ARGS=rvf
+TAR_FORMAT=ustar
+TAR_EXTRA=""
+GPG_EXTRA=""
+DU_ARGS=-ks
+HEADER=`dirname "$0"`/makeself-header.sh
+SIGNATURE=""
+TARGETDIR=""
+NOOVERWRITE=n
+DATE=`LC_ALL=C date`
+EXPORT_CONF=n
+SHA256=n
+OWNERSHIP=n
+SIGN=n
+GPG_PASSPHRASE=""
+
+# LSM file stuff
+LSM_CMD="echo No LSM. >> \"\$archname\""
+
+while true
+do
+ case "$1" in
+ --version | -v)
+ echo Makeself version $MS_VERSION
+ exit 0
+ ;;
+ --pbzip2)
+ COMPRESS=pbzip2
+ shift
+ ;;
+ --bzip3)
+ COMPRESS=bzip3
+ shift
+ ;;
+ --bzip2)
+ COMPRESS=bzip2
+ shift
+ ;;
+ --gzip)
+ COMPRESS=gzip
+ shift
+ ;;
+ --pigz)
+ COMPRESS=pigz
+ shift
+ ;;
+ --zstd)
+ COMPRESS=zstd
+ shift
+ ;;
+ --xz)
+ COMPRESS=xz
+ shift
+ ;;
+ --lzo)
+ COMPRESS=lzo
+ shift
+ ;;
+ --lz4)
+ COMPRESS=lz4
+ shift
+ ;;
+ --compress)
+ COMPRESS=compress
+ shift
+ ;;
+ --base64)
+ COMPRESS=base64
+ shift
+ ;;
+ --gpg-encrypt)
+ COMPRESS=gpg
+ shift
+ ;;
+ --gpg-asymmetric-encrypt-sign)
+ COMPRESS=gpg-asymmetric
+ shift
+ ;;
+ --gpg-extra)
+ GPG_EXTRA="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --ssl-encrypt)
+ ENCRYPT=openssl
+ shift
+ ;;
+ --ssl-passwd)
+ PASSWD=$2
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --ssl-pass-src)
+ PASSWD_SRC=$2
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --ssl-no-md)
+ OPENSSL_NO_MD=y
+ shift
+ ;;
+ --nocomp)
+ COMPRESS=none
+ shift
+ ;;
+ --complevel)
+ COMPRESS_LEVEL="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --threads)
+ THREADS="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --nochown)
+ OWNERSHIP=n
+ shift
+ ;;
+ --chown)
+ OWNERSHIP=y
+ shift
+ ;;
+ --notemp)
+ KEEP=y
+ shift
+ ;;
+ --copy)
+ COPY=copy
+ shift
+ ;;
+ --current)
+ CURRENT=y
+ KEEP=y
+ shift
+ ;;
+ --tar-format)
+ TAR_FORMAT="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --tar-extra)
+ TAR_EXTRA="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --untar-extra)
+ UNTAR_EXTRA="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --target)
+ TARGETDIR="$2"
+ KEEP=y
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --sign)
+ SIGN=y
+ GPG_PASSPHRASE="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --nooverwrite)
+ NOOVERWRITE=y
+ shift
+ ;;
+ --needroot)
+ NEED_ROOT=y
+ shift
+ ;;
+ --header)
+ HEADER="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --cleanup)
+ CLEANUP_SCRIPT="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --license)
+ # We need to escape all characters having a special meaning in double quotes
+ LICENSE=$(sed 's/\\/\\\\/g; s/"/\\\"/g; s/`/\\\`/g; s/\$/\\\$/g' "$2")
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --follow)
+ TAR_ARGS=rvhf
+ DU_ARGS=-ksL
+ shift
+ ;;
+ --noprogress)
+ NOPROGRESS=y
+ shift
+ ;;
+ --nox11)
+ NOX11=y
+ shift
+ ;;
+ --nowait)
+ NOWAIT=y
+ shift
+ ;;
+ --nomd5)
+ NOMD5=y
+ shift
+ ;;
+ --sha256)
+ SHA256=y
+ shift
+ ;;
+ --nocrc)
+ NOCRC=y
+ shift
+ ;;
+ --append)
+ APPEND=y
+ shift
+ ;;
+ --lsm)
+ LSM_CMD="awk 1 \"$2\" >> \"\$archname\""
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --packaging-date)
+ DATE="$2"
+ shift 2 || { MS_Usage; exit 1; }
+ ;;
+ --help-header)
+ HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2`
+ shift 2 || { MS_Usage; exit 1; }
+ [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER
+"
+ ;;
+ --tar-quietly)
+ TAR_QUIETLY=y
+ shift
+ ;;
+ --keep-umask)
+ KEEP_UMASK=y
+ shift
+ ;;
+ --export-conf)
+ EXPORT_CONF=y
+ shift
+ ;;
+ -q | --quiet)
+ QUIET=y
+ shift
+ ;;
+ -h | --help)
+ MS_Usage
+ ;;
+ -*)
+ echo Unrecognized flag : "$1"
+ MS_Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if test $# -lt 1; then
+ MS_Usage
+else
+ if test -d "$1"; then
+ archdir="$1"
+ else
+ echo "Directory $1 does not exist." >&2
+ exit 1
+ fi
+fi
+archname="$2"
+
+if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then
+ if test "$TAR_ARGS" = "rvf"; then
+ TAR_ARGS="rf"
+ elif test "$TAR_ARGS" = "rvhf"; then
+ TAR_ARGS="rhf"
+ fi
+fi
+
+if test "$APPEND" = y; then
+ if test $# -lt 2; then
+ MS_Usage
+ fi
+
+ # Gather the info from the original archive
+ OLDENV=`sh "$archname" --dumpconf`
+ if test $? -ne 0; then
+ echo "Unable to update archive: $archname" >&2
+ exit 1
+ else
+ eval "$OLDENV"
+ OLDSKIP=`expr $SKIP + 1`
+ fi
+else
+ if test "$KEEP" = n -a $# = 3; then
+ echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2
+ echo >&2
+ MS_Usage
+ fi
+ # We don't want to create an absolute directory unless a target directory is defined
+ if test "$CURRENT" = y; then
+ archdirname="."
+ elif test x"$TARGETDIR" != x; then
+ archdirname="$TARGETDIR"
+ else
+ archdirname=`basename "$1"`
+ fi
+
+ if test $# -lt 3; then
+ MS_Usage
+ fi
+
+ LABEL="$3"
+ SCRIPT="$4"
+ test "x$SCRIPT" = x || shift 1
+ shift 3
+ SCRIPTARGS="$*"
+fi
+
+if test "$KEEP" = n -a "$CURRENT" = y; then
+ echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2
+ exit 1
+fi
+
+case $COMPRESS in
+gzip)
+ GZIP_CMD="gzip -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pigz)
+ GZIP_CMD="pigz -$COMPRESS_LEVEL"
+ if test $THREADS -ne $DEFAULT_THREADS; then # Leave as the default if threads not indicated
+ GZIP_CMD="$GZIP_CMD --processes $THREADS"
+ fi
+ GUNZIP_CMD="gzip -cd"
+ ;;
+zstd)
+ GZIP_CMD="zstd -$COMPRESS_LEVEL"
+ if test $THREADS -ne $DEFAULT_THREADS; then # Leave as the default if threads not indicated
+ GZIP_CMD="$GZIP_CMD --threads=$THREADS"
+ fi
+ GUNZIP_CMD="zstd -cd"
+ ;;
+pbzip2)
+ GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL"
+ if test $THREADS -ne $DEFAULT_THREADS; then # Leave as the default if threads not indicated
+ GZIP_CMD="$GZIP_CMD -p$THREADS"
+ fi
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+bzip3)
+ # Map the compression level to a block size in MiB as 2^(level-1).
+ BZ3_COMPRESS_LEVEL=`echo "2^($COMPRESS_LEVEL-1)" | bc`
+ GZIP_CMD="bzip3 -b$BZ3_COMPRESS_LEVEL"
+ if test $THREADS -ne $DEFAULT_THREADS; then # Leave as the default if threads not indicated
+ GZIP_CMD="$GZIP_CMD -j$THREADS"
+ fi
+ JOBS=`echo "10-$COMPRESS_LEVEL" | bc`
+ GUNZIP_CMD="bzip3 -dj$JOBS"
+ ;;
+bzip2)
+ GZIP_CMD="bzip2 -$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+xz)
+ GZIP_CMD="xz -c$COMPRESS_LEVEL"
+ # Must opt-in by specifying a value since not all versions of xz support threads
+ if test $THREADS -ne $DEFAULT_THREADS; then
+ GZIP_CMD="$GZIP_CMD --threads=$THREADS"
+ fi
+ GUNZIP_CMD="xz -d"
+ ;;
+lzo)
+ GZIP_CMD="lzop -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lzop -d"
+ ;;
+lz4)
+ GZIP_CMD="lz4 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lz4 -d"
+ ;;
+base64)
+ GZIP_CMD="base64"
+ GUNZIP_CMD="base64 --decode -i -"
+ ;;
+gpg)
+ GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL"
+ GUNZIP_CMD="gpg -d"
+ ENCRYPT="gpg"
+ ;;
+gpg-asymmetric)
+ GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es"
+ GUNZIP_CMD="gpg --yes -d"
+ ENCRYPT="gpg"
+ ;;
+compress)
+ GZIP_CMD="compress -fc"
+ GUNZIP_CMD="(type compress >/dev/null 2>&1 && compress -fcd || gzip -cd)"
+ ;;
+none)
+ GZIP_CMD="cat"
+ GUNZIP_CMD="cat"
+ ;;
+esac
+
+if test x"$ENCRYPT" = x"openssl"; then
+ if test x"$APPEND" = x"y"; then
+ echo "Appending to existing archive is not compatible with OpenSSL encryption." >&2
+ fi
+
+ ENCRYPT_CMD="openssl enc -aes-256-cbc -salt"
+ DECRYPT_CMD="openssl enc -aes-256-cbc -d"
+
+ if test x"$OPENSSL_NO_MD" != x"y"; then
+ ENCRYPT_CMD="$ENCRYPT_CMD -md sha256"
+ DECRYPT_CMD="$DECRYPT_CMD -md sha256"
+ fi
+
+ if test -n "$PASSWD_SRC"; then
+ ENCRYPT_CMD="$ENCRYPT_CMD -pass $PASSWD_SRC"
+ elif test -n "$PASSWD"; then
+ ENCRYPT_CMD="$ENCRYPT_CMD -pass pass:$PASSWD"
+ fi
+fi
+
+tmpfile="${TMPDIR:-/tmp}/mkself$$"
+
+if test -f "$HEADER"; then
+ oldarchname="$archname"
+ archname="$tmpfile"
+ # Generate a fake header to count its lines
+ SKIP=0
+ . "$HEADER"
+ SKIP=`cat "$tmpfile" |wc -l`
+ # Get rid of any spaces
+ SKIP=`expr $SKIP`
+ rm -f "$tmpfile"
+ if test "$QUIET" = "n"; then
+ echo "Header is $SKIP lines long" >&2
+ fi
+ archname="$oldarchname"
+else
+ echo "Unable to open header file: $HEADER" >&2
+ exit 1
+fi
+
+if test "$QUIET" = "n"; then
+ echo
+fi
+
+if test "$APPEND" = n; then
+ if test -f "$archname"; then
+ echo "WARNING: Overwriting existing file: $archname" >&2
+ fi
+fi
+
+USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'`
+
+if test "." = "$archdirname"; then
+ if test "$KEEP" = n; then
+ archdirname="makeself-$$-`date +%Y%m%d%H%M%S`"
+ fi
+fi
+
+test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; }
+if test "$QUIET" = "n"; then
+ echo "About to compress $USIZE KB of data..."
+ echo "Adding files to archive named \"$archname\"..."
+fi
+
+# See if we have GNU tar
+TAR=`exec <&- 2>&-; which gtar || command -v gtar || type gtar`
+test -x "$TAR" || TAR=`exec <&- 2>&-; which bsdtar || command -v bsdtar || type bsdtar`
+test -x "$TAR" || TAR=tar
+
+tmparch="${TMPDIR:-/tmp}/mkself$$.tar"
+(
+ if test "$APPEND" = "y"; then
+ tail -n "+$OLDSKIP" "$archname" | eval "$GUNZIP_CMD" > "$tmparch"
+ fi
+ cd "$archdir"
+ # "Determining if a directory is empty"
+ # https://www.etalabs.net/sh_tricks.html
+ find . \
+ \( \
+ ! -type d \
+ -o \
+ \( -links 2 -exec sh -c '
+ is_empty () (
+ cd "$1"
+ set -- .[!.]* ; test -f "$1" && return 1
+ set -- ..?* ; test -f "$1" && return 1
+ set -- * ; test -f "$1" && return 1
+ return 0
+ )
+ is_empty "$0"' {} \; \
+ \) \
+ \) -print \
+ | LC_ALL=C sort \
+ | sed 's/./\\&/g' \
+ | xargs $TAR $TAR_EXTRA --format $TAR_FORMAT -$TAR_ARGS "$tmparch"
+) || {
+ echo "ERROR: failed to create temporary archive: $tmparch"
+ rm -f "$tmparch" "$tmpfile"
+ exit 1
+}
+
+USIZE=`du $DU_ARGS "$tmparch" | awk '{print $1}'`
+
+eval "$GZIP_CMD" <"$tmparch" >"$tmpfile" || {
+ echo "ERROR: failed to create temporary file: $tmpfile"
+ rm -f "$tmparch" "$tmpfile"
+ exit 1
+}
+rm -f "$tmparch"
+
+if test x"$ENCRYPT" = x"openssl"; then
+ echo "About to encrypt archive \"$archname\"..."
+ { eval "$ENCRYPT_CMD -in $tmpfile -out ${tmpfile}.enc" && mv -f ${tmpfile}.enc $tmpfile; } || \
+ { echo Aborting: could not encrypt temporary file: "$tmpfile".; rm -f "$tmpfile"; exit 1; }
+fi
+
+fsize=`cat "$tmpfile" | wc -c | tr -d " "`
+
+# Compute the checksums
+
+shasum=0000000000000000000000000000000000000000000000000000000000000000
+md5sum=00000000000000000000000000000000
+crcsum=0000000000
+
+if test "$NOCRC" = y; then
+ if test "$QUIET" = "n"; then
+ echo "skipping crc at user request"
+ fi
+else
+ crcsum=`CMD_ENV=xpg4 cksum < "$tmpfile" | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1`
+ if test "$QUIET" = "n"; then
+ echo "CRC: $crcsum"
+ fi
+fi
+
+if test "$SHA256" = y; then
+ SHA_PATH=`exec <&- 2>&-; which shasum || command -v shasum || type shasum`
+ if test -x "$SHA_PATH"; then
+ shasum=`eval "$SHA_PATH -a 256" < "$tmpfile" | cut -b-64`
+ else
+ SHA_PATH=`exec <&- 2>&-; which sha256sum || command -v sha256sum || type sha256sum`
+ shasum=`eval "$SHA_PATH" < "$tmpfile" | cut -b-64`
+ fi
+ if test "$QUIET" = "n"; then
+ if test -x "$SHA_PATH"; then
+ echo "SHA256: $shasum"
+ else
+ echo "SHA256: none, SHA command not found"
+ fi
+ fi
+fi
+if test "$NOMD5" = y; then
+ if test "$QUIET" = "n"; then
+ echo "Skipping md5sum at user request"
+ fi
+else
+ # Try to locate a MD5 binary
+ OLD_PATH=$PATH
+ PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest`
+ PATH=$OLD_PATH
+ if test -x "$MD5_PATH"; then
+ if test `basename ${MD5_PATH}`x = digestx; then
+ MD5_ARG="-a md5"
+ fi
+ md5sum=`eval "$MD5_PATH $MD5_ARG" < "$tmpfile" | cut -b-32`
+ if test "$QUIET" = "n"; then
+ echo "MD5: $md5sum"
+ fi
+ else
+ if test "$QUIET" = "n"; then
+ echo "MD5: none, MD5 command not found"
+ fi
+ fi
+fi
+if test "$SIGN" = y; then
+ GPG_PATH=`exec <&- 2>&-; which gpg || command -v gpg || type gpg`
+ if test -x "$GPG_PATH"; then
+ SIGNATURE=`$GPG_PATH --pinentry-mode=loopback --batch --yes $GPG_EXTRA --passphrase "$GPG_PASSPHRASE" --output - --detach-sig $tmpfile | base64 | tr -d \\\\n`
+ if test "$QUIET" = "n"; then
+ echo "Signature: $SIGNATURE"
+ fi
+ else
+ echo "Missing gpg command" >&2
+ fi
+fi
+
+totalsize=0
+for size in $fsize;
+do
+ totalsize=`expr $totalsize + $size`
+done
+
+if test "$APPEND" = y; then
+ mv "$archname" "$archname".bak || exit
+
+ # Prepare entry for new archive
+ filesizes="$fsize"
+ CRCsum="$crcsum"
+ MD5sum="$md5sum"
+ SHAsum="$shasum"
+ Signature="$SIGNATURE"
+ # Generate the header
+ . "$HEADER"
+ # Append the new data
+ cat "$tmpfile" >> "$archname"
+
+ chmod +x "$archname"
+ rm -f "$archname".bak
+ if test "$QUIET" = "n"; then
+ echo "Self-extractable archive \"$archname\" successfully updated."
+ fi
+else
+ filesizes="$fsize"
+ CRCsum="$crcsum"
+ MD5sum="$md5sum"
+ SHAsum="$shasum"
+ Signature="$SIGNATURE"
+
+ # Generate the header
+ . "$HEADER"
+
+ # Append the compressed tar data after the stub
+ if test "$QUIET" = "n"; then
+ echo
+ fi
+ cat "$tmpfile" >> "$archname"
+ chmod +x "$archname"
+ if test "$QUIET" = "n"; then
+ echo Self-extractable archive \"$archname\" successfully created.
+ fi
+fi
+rm -f "$tmpfile"
diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh
new file mode 100755
index 00000000..38cc41ef
--- /dev/null
+++ b/packaging/makeself/post-installer.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This script is started using the shell of the system
+# and executes our 'install-or-update.sh' script
+# using the netdata supplied, statically linked BASH
+#
+# so, at 'install-or-update.sh' we are always sure
+# we run under BASH v4.
+
+./bin/bash system/install-or-update.sh "${@}"
diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh
new file mode 100755
index 00000000..dd123c21
--- /dev/null
+++ b/packaging/makeself/run-all-jobs.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+LC_ALL=C
+umask 002
+
+# -----------------------------------------------------------------------------
+# prepare the environment for the jobs
+
+# installation directory
+export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+
+# our source directory
+NETDATA_MAKESELF_PATH="$(dirname "${0}")"
+export NETDATA_MAKESELF_PATH
+if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ]; then
+ NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+ export NETDATA_MAKESELF_PATH
+fi
+
+# netdata source directory
+export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+
+# make sure ${NULL} is empty
+export NULL=
+
+# -----------------------------------------------------------------------------
+
+cd "${NETDATA_MAKESELF_PATH}" || exit 1
+
+# shellcheck source=packaging/makeself/functions.sh
+. ./functions.sh "${@}" || exit 1
+
+for x in jobs/*.install.sh; do
+ progress "running ${x}"
+ "${x}" "${NETDATA_INSTALL_PATH}"
+done
+
+echo >&2 "All jobs for static packaging done successfully."
+exit 0
diff --git a/packaging/makeself/uname2platform.sh b/packaging/makeself/uname2platform.sh
new file mode 100755
index 00000000..7eab706e
--- /dev/null
+++ b/packaging/makeself/uname2platform.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+BUILDARCH="${1}"
+
+case "${BUILDARCH}" in
+ x86_64) echo "linux/amd64" ;;
+ armv7l) echo "linux/arm/v7" ;;
+ aarch64) echo "linux/arm64/v8" ;;
+ ppc64le) echo "linux/ppc64le" ;;
+ *)
+ echo "Unknown target architecture '${BUILDARCH}'." >&2
+ exit 1
+ ;;
+esac
diff --git a/packaging/protobuf.checksums b/packaging/protobuf.checksums
new file mode 100644
index 00000000..4a025c5f
--- /dev/null
+++ b/packaging/protobuf.checksums
@@ -0,0 +1 @@
+89ac31a93832e204db6d73b1e80f39f142d5747b290f17340adce5be5b122f94 protobuf-cpp-3.19.4.tar.gz
diff --git a/packaging/protobuf.version b/packaging/protobuf.version
new file mode 100644
index 00000000..de24deec
--- /dev/null
+++ b/packaging/protobuf.version
@@ -0,0 +1 @@
+3.19.4
diff --git a/packaging/repoconfig/Makefile b/packaging/repoconfig/Makefile
new file mode 100644
index 00000000..18b9887f
--- /dev/null
+++ b/packaging/repoconfig/Makefile
@@ -0,0 +1,35 @@
+FILES = netdata.list netdata-edge.list netdata-archive-keyring.gpg netdata-edge-archive-keyring.gpg netdata-repoconfig-archive-keyring.gpg
+
+all: $(FILES)
+
+netdata.list: netdata.list.in
+ cp netdata.list.in netdata.list
+ set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__/stable/" netdata.list
+
+netdata-edge.list: netdata.list.in
+ cp netdata.list.in netdata-edge.list
+ set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__/edge/" netdata-edge.list
+
+netdata.gpg.key:
+ curl -L https://repo.netdata.cloud/netdatabot.gpg.key > $@
+
+netdata-archive-keyring.gpg: netdata.gpg.key
+ gpg --dearmor > $@ < $<
+
+netdata-edge-archive-keyring.gpg: netdata.gpg.key
+ gpg --dearmor > $@ < $<
+
+netdata-repoconfig-archive-keyring.gpg: netdata.gpg.key
+ gpg --dearmor > $@ < $<
+
+debian/tmp:
+ mkdir -p $@
+
+install: $(FILES) debian/tmp
+ cp $(FILES) debian/tmp/
+
+clean:
+ rm -f $(FILES)
+
+.PHONY: clean
+.INTERMEDIATE: netdatabot.gpg.key
diff --git a/packaging/repoconfig/build-deb.sh b/packaging/repoconfig/build-deb.sh
new file mode 100755
index 00000000..97f929a6
--- /dev/null
+++ b/packaging/repoconfig/build-deb.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+
+# Extract distro info from /etc/os-release
+DISTVERS="$(awk -F'"' '/VERSION_ID=/ {print $2}' /etc/os-release)"
+DISTNAME="$(awk -F'=' '/^ID=/ {print $2}' /etc/os-release)"
+
+# Needed because dpkg is stupid and tries to configure things interactively if it sees a terminal.
+export DEBIAN_FRONTEND=noninteractive
+
+# Pull in our dependencies
+apt update || exit 1
+apt upgrade -y || exit 1
+apt install -y build-essential debhelper curl gnupg || exit 1
+
+# Run the builds in an isolated source directory.
+# This removes the need for cleanup, and ensures anything the build does
+# doesn't muck with the user's sources.
+cp -a /netdata/packaging/repoconfig /usr/src || exit 1
+cd /usr/src/repoconfig || exit 1
+
+# pre/post options are after 1.18.8, is simpler to just check help for their existence than parsing version
+if dpkg-buildpackage --help | grep "\-\-post\-clean" 2> /dev/null > /dev/null; then
+ dpkg-buildpackage --post-clean --pre-clean -b -us -uc || exit 1
+else
+ dpkg-buildpackage -b -us -uc || exit 1
+fi
+
+# Embed distro info in package name.
+# This is required to make the repo actually standards compliant wthout packageclouds hacks.
+distid="${DISTNAME}${DISTVERS}"
+for pkg in /usr/src/*.deb; do
+ pkgname="$(basename "${pkg}" .deb)"
+ name="$(echo "${pkgname}" | cut -f 1 -d '_')"
+ version="$(echo "${pkgname}" | cut -f 2 -d '_')"
+ arch="$(echo "${pkgname}" | cut -f 3 -d '_')"
+
+ newname="$(dirname "${pkg}")/${name}_${version}+${distid}_${arch}.deb"
+ mv "${pkg}" "${newname}"
+done
+
+# Copy the built packages to /netdata/artifacts (which may be bind-mounted)
+# Also ensure /netdata/artifacts exists and create it if it doesn't
+[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts
+cp -a /usr/src/*.deb /netdata/artifacts/ || exit 1
+
+# Correct ownership of the artifacts.
+# Without this, the artifacts directory and it's contents end up owned
+# by root instead of the local user on Linux boxes
+chown -R --reference=/netdata /netdata/artifacts
diff --git a/packaging/repoconfig/build-rpm.sh b/packaging/repoconfig/build-rpm.sh
new file mode 100755
index 00000000..6c07c661
--- /dev/null
+++ b/packaging/repoconfig/build-rpm.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+prefix='/root/rpmbuild'
+
+if command -v dnf > /dev/null ; then
+ dnf distro-sync -y --nodocs || exit 1
+ dnf install -y --nodocs --setopt=install_weak_deps=False rpm-build || exit 1
+elif command -v yum > /dev/null ; then
+ yum distro-sync -y || exit 1
+ yum install -y rpm-build || exit 1
+elif command -v zypper > /dev/null ; then
+ zypper update -y || exit 1
+ zypper install -y rpm-build || exit 1
+ prefix="/usr/src/packages"
+fi
+
+mkdir -p "${prefix}/BUILD" "${prefix}/RPMS" "${prefix}/SRPMS" "${prefix}/SPECS" "${prefix}/SOURCES" || exit 1
+cp -a /netdata/packaging/repoconfig/netdata-repo.spec "${prefix}/SPECS" || exit 1
+cp -a /netdata/packaging/repoconfig/* "${prefix}/SOURCES/" || exit 1
+
+rpmbuild -bb --rebuild "${prefix}/SPECS/netdata-repo.spec" || exit 1
+
+[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts
+find "${prefix}/RPMS/" -type f -name '*.rpm' -exec cp '{}' /netdata/artifacts \; || exit 1
+
+chown -R --reference=/netdata /netdata/artifacts
diff --git a/packaging/repoconfig/debian/changelog b/packaging/repoconfig/debian/changelog
new file mode 100644
index 00000000..d056fa43
--- /dev/null
+++ b/packaging/repoconfig/debian/changelog
@@ -0,0 +1,25 @@
+netdata-repo (2-2) unstable; urgency=medium
+
+ * Version bump to keep in sync with RPM repo packages
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 13 Nov 2023 11:15:00 -0500
+
+netdata-repo (2-1) unstable; urgency=medium
+
+ * Switched to new package hosting infrastructure
+ * Removed apt-transport-https requirement
+
+ -- Netdata Builder <bot@netdata.cloud> Wed, 18 Jan 2023 08:30:00 -0500
+
+netdata-repo (1-2) unstable; urgency=medium
+
+ * Fixed package file naming for repo layout compliance
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 6 Jun 2022 09:30:00 -0500
+
+netdata-repo (1-1) unstable; urgency=medium
+
+ * Initial Release
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 14 Jun 2021 08:00:00 -0500
+
diff --git a/packaging/repoconfig/debian/compat b/packaging/repoconfig/debian/compat
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/packaging/repoconfig/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/packaging/repoconfig/debian/control b/packaging/repoconfig/debian/control
new file mode 100644
index 00000000..fdea6a82
--- /dev/null
+++ b/packaging/repoconfig/debian/control
@@ -0,0 +1,19 @@
+Source: netdata-repo
+Section: net
+Priority: optional
+Maintainer: Netdata Builder <bot@netdata.cloud>
+Standards-Version: 3.9.6
+Build-Depends: debhelper (>= 9), curl, gnupg
+Homepage: https://netdata.cloud
+
+Package: netdata-repo
+Architecture: all
+Depends: debian-archive-keyring, gnupg
+Conflicts: netdata-repo-edge
+Description: Configuration for the official Netdata Stable package repository.
+
+Package: netdata-repo-edge
+Architecture:all
+Depends: debian-archive-keyring, gnupg
+Conflicts: netdata-repo
+Description: Configuration for the official Netdata Edge package repository.
diff --git a/packaging/repoconfig/debian/copyright b/packaging/repoconfig/debian/copyright
new file mode 100644
index 00000000..44b59693
--- /dev/null
+++ b/packaging/repoconfig/debian/copyright
@@ -0,0 +1,10 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: Netdata
+Upstream-Contact: Costa Tsaousis <costa@netdata.cloud>
+Source: https://github.com/netdata/netdata
+
+Files: *
+Copyright: 2021-2023 Netdata Inc.
+License: GPL-3+
+ On Debian systems, the complete text of the GNU General Public
+ License version 3 can be found in /usr/share/common-licenses/GPL-3.
diff --git a/packaging/repoconfig/debian/rules b/packaging/repoconfig/debian/rules
new file mode 100755
index 00000000..0151b96e
--- /dev/null
+++ b/packaging/repoconfig/debian/rules
@@ -0,0 +1,21 @@
+#!/usr/bin/make -f
+
+TOP = $(CURDIR)/debian/netdata-repo
+TOP_EDGE = $(CURDIR)/debian/netdata-repo-edge
+TEMPTOP = $(CURDIR)/debian/tmp
+
+%:
+ dh $@
+
+override_dh_configure:
+ true
+
+override_dh_install:
+ mkdir -p $(TOP)/etc/apt/sources.list.d $(TOP)/etc/apt/trusted.gpg.d/
+ mv -f $(TEMPTOP)/netdata.list $(TOP)/etc/apt/sources.list.d
+ mv -f $(TEMPTOP)/netdata-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d
+ cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d
+ mkdir -p $(TOP_EDGE)/etc/apt/sources.list.d $(TOP_EDGE)/etc/apt/trusted.gpg.d/
+ mv -f $(TEMPTOP)/netdata-edge.list $(TOP_EDGE)/etc/apt/sources.list.d
+ mv -f $(TEMPTOP)/netdata-edge-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d
+ cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d
diff --git a/packaging/repoconfig/debian/source/format b/packaging/repoconfig/debian/source/format
new file mode 100644
index 00000000..163aaf8d
--- /dev/null
+++ b/packaging/repoconfig/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/packaging/repoconfig/netdata-edge.repo.al b/packaging/repoconfig/netdata-edge.repo.al
new file mode 100644
index 00000000..4a300a26
--- /dev/null
+++ b/packaging/repoconfig/netdata-edge.repo.al
@@ -0,0 +1,21 @@
+[netdata-edge]
+name=Netdata Edge
+baseurl=https://repo.netdata.cloud/repos/edge/amazonlinux/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/amazonlinux/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.centos b/packaging/repoconfig/netdata-edge.repo.centos
new file mode 100644
index 00000000..fd96f0d7
--- /dev/null
+++ b/packaging/repoconfig/netdata-edge.repo.centos
@@ -0,0 +1,21 @@
+[netdata-edge]
+name=Netdata Edge
+baseurl=https://repo.netdata.cloud/repos/edge/el/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/el/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.fedora b/packaging/repoconfig/netdata-edge.repo.fedora
new file mode 100644
index 00000000..03b0e9c7
--- /dev/null
+++ b/packaging/repoconfig/netdata-edge.repo.fedora
@@ -0,0 +1,21 @@
+[netdata-edge]
+name=Netdata Edge
+baseurl=https://repo.netdata.cloud/repos/edge/fedora/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/fedora/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.ol b/packaging/repoconfig/netdata-edge.repo.ol
new file mode 100644
index 00000000..89f74e71
--- /dev/null
+++ b/packaging/repoconfig/netdata-edge.repo.ol
@@ -0,0 +1,21 @@
+[netdata-edge]
+name=Netdata Edge
+baseurl=https://repo.netdata.cloud/repos/edge/ol/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/ol/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.suse b/packaging/repoconfig/netdata-edge.repo.suse
new file mode 100644
index 00000000..f65bd08d
--- /dev/null
+++ b/packaging/repoconfig/netdata-edge.repo.suse
@@ -0,0 +1,19 @@
+[netdata-edge]
+name=Netdata Edge
+baseurl=https://repo.netdata.cloud/repos/edge/opensuse/$releasever/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
+
+[netdata-repoconfig]
+name=Netdata Repoconfig
+baseurl=https://repo.netdata.cloud/repos/repoconfig/opensuse/$releasever/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
diff --git a/packaging/repoconfig/netdata-repo.spec b/packaging/repoconfig/netdata-repo.spec
new file mode 100644
index 00000000..242178ba
--- /dev/null
+++ b/packaging/repoconfig/netdata-repo.spec
@@ -0,0 +1,118 @@
+%{?rhel:%global centos_ver %rhel}
+
+Name: netdata-repo
+Version: 2
+Release: 2
+Summary: Netdata stable repositories configuration.
+
+Group: System Environment/Base
+License: GPLv2
+
+Source0: netdata.repo.fedora
+Source1: netdata-edge.repo.fedora
+Source2: netdata.repo.suse
+Source3: netdata-edge.repo.suse
+Source4: netdata.repo.centos
+Source5: netdata-edge.repo.centos
+Source6: netdata.repo.ol
+Source7: netdata-edge.repo.ol
+Source8: netdata.repo.al
+Source9: netdata-edge.repo.al
+
+BuildArch: noarch
+
+%if 0%{?centos_ver} && 0%{?centos_ver} < 8
+Requires: yum-plugin-priorities
+%endif
+
+%if 0%{?centos_ver} && 0%{!?amazon_linux:1} && 0%{!?oraclelinux:1}
+Requires: epel-release
+%endif
+
+# Overlapping file installs
+Conflicts: netdata-repo-edge
+
+%description
+This package contains the official Netdata package repository configuration for stable versions of Netdata.
+
+%prep
+%setup -q -c -T
+
+%if 0%{?fedora}
+install -pm 644 %{SOURCE0} ./netdata.repo
+install -pm 644 %{SOURCE1} ./netdata-edge.repo
+%endif
+
+%if 0%{?suse_version}
+install -pm 644 %{SOURCE2} ./netdata.repo
+install -pm 644 %{SOURCE3} ./netdata-edge.repo
+%endif
+
+%if 0%{?centos_ver}
+# Amazon Linux 2 looks like CentOS, but with extra macros.
+%if 0%{?amzn2}
+install -pm 644 %{SOURCE8} ./netdata.repo
+install -pm 644 %{SOURCE9} ./netdata-edge.repo
+%else
+install -pm 644 %{SOURCE4} ./netdata.repo
+install -pm 644 %{SOURCE5} ./netdata-edge.repo
+%endif
+%endif
+
+%if 0%{?oraclelinux}
+install -pm 644 %{SOURCE6} ./netdata.repo
+install -pm 644 %{SOURCE7} ./netdata-edge.repo
+%endif
+
+%build
+true
+
+%install
+rm -rf $RPM_BUILD_ROOT
+
+%if 0%{?suse_version}
+install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
+install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
+install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
+%else
+install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
+install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
+install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
+%endif
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%if 0%{?suse_version}
+%attr(644,root,root) /etc/zypp/repos.d/netdata.repo
+%else
+%attr(644,root,root) /etc/yum.repos.d/netdata.repo
+%endif
+
+%package edge
+Summary: Netdata nightly repositories configuration.
+Group: System Environment/Base
+
+# Overlapping file installs
+Conflicts: netdata-repo
+
+%description edge
+This package contains the official Netdata package repository configuration for nightly versions of Netdata.
+
+%files edge
+%if 0%{?suse_version}
+%attr(644,root,root) /etc/zypp/repos.d/netdata-edge.repo
+%else
+%attr(644,root,root) /etc/yum.repos.d/netdata-edge.repo
+%endif
+
+%changelog
+* Mon Nov 13 2023 Austin Hemmelgarn <austin@netdata.cloud> 2-2
+- Add EPEL requirement for RHEL packages.
+* Wed Dec 7 2022 Austin Hemmelgarn <austin@netdata.cloud> 2-1
+- Switch to new hosting at repo.netdata.cloud.
+* Mon Jun 6 2022 Austin Hemmelgarn <austin@netdata.cloud> 1-2
+- Bump release to keep in sync with DEB package.
+* Mon Jun 14 2021 Austin Hemmelgarn <austin@netdata.cloud> 1-1
+- Initial revision
diff --git a/packaging/repoconfig/netdata.list.in b/packaging/repoconfig/netdata.list.in
new file mode 100644
index 00000000..a49dbd91
--- /dev/null
+++ b/packaging/repoconfig/netdata.list.in
@@ -0,0 +1,2 @@
+deb http://repo.netdata.cloud/repos/__VARIANT__/__DISTRO__/ __SUITE__/
+deb http://repo.netdata.cloud/repos/repoconfig/__DISTRO__/ __SUITE__/
diff --git a/packaging/repoconfig/netdata.repo.al b/packaging/repoconfig/netdata.repo.al
new file mode 100644
index 00000000..0bacb3a1
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.al
@@ -0,0 +1,21 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/stable/amazonlinux/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/amazonlinux/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata.repo.centos b/packaging/repoconfig/netdata.repo.centos
new file mode 100644
index 00000000..221e6451
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.centos
@@ -0,0 +1,21 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/stable/el/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/el/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata.repo.fedora b/packaging/repoconfig/netdata.repo.fedora
new file mode 100644
index 00000000..e13262ac
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.fedora
@@ -0,0 +1,21 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/stable/fedora/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/fedora/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata.repo.ol b/packaging/repoconfig/netdata.repo.ol
new file mode 100644
index 00000000..0488670d
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.ol
@@ -0,0 +1,21 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/stable/ol/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/ol/$releasever/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+sslcacert=/etc/pki/tls/certs/ca-bundle.crt
+priority=50
diff --git a/packaging/repoconfig/netdata.repo.suse b/packaging/repoconfig/netdata.repo.suse
new file mode 100644
index 00000000..8204d8d4
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.suse
@@ -0,0 +1,19 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/stable/opensuse/$releasever/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
+
+[netdata-repoconfig]
+name=Netdata Repoconfig
+baseurl=https://repo.netdata.cloud/repos/repoconfig/opensuse/$releasever/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
diff --git a/packaging/version b/packaging/version
new file mode 100644
index 00000000..0ade2bb9
--- /dev/null
+++ b/packaging/version
@@ -0,0 +1 @@
+v1.44.3
diff --git a/packaging/yaml.checksums b/packaging/yaml.checksums
new file mode 100644
index 00000000..563c273d
--- /dev/null
+++ b/packaging/yaml.checksums
@@ -0,0 +1 @@
+c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4 yaml-0.2.5.tar.gz
diff --git a/packaging/yaml.version b/packaging/yaml.version
new file mode 100644
index 00000000..3a4036fb
--- /dev/null
+++ b/packaging/yaml.version
@@ -0,0 +1 @@
+0.2.5