summaryrefslogtreecommitdiffstats
path: root/packaging
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--packaging/PLATFORM_SUPPORT.md36
-rw-r--r--packaging/VERSIONING_AND_PUBLIC_API.md2
-rwxr-xr-xpackaging/build-package.sh99
-rwxr-xr-xpackaging/bundle-ebpf-co-re.sh9
-rwxr-xr-xpackaging/bundle-ebpf.sh20
-rwxr-xr-xpackaging/bundle-libbpf.sh27
-rwxr-xr-xpackaging/bundle-protobuf.sh16
-rw-r--r--packaging/check-for-go-toolchain.sh176
-rw-r--r--packaging/cmake/Modules/FindGo.cmake68
-rw-r--r--packaging/cmake/Modules/NetdataCompilerFlags.cmake102
-rw-r--r--packaging/cmake/Modules/NetdataDetectSystemd.cmake42
-rw-r--r--packaging/cmake/Modules/NetdataEBPFCORE.cmake27
-rw-r--r--packaging/cmake/Modules/NetdataEBPFLegacy.cmake48
-rw-r--r--packaging/cmake/Modules/NetdataFetchContentExtra.cmake54
-rw-r--r--packaging/cmake/Modules/NetdataGoTools.cmake85
-rw-r--r--packaging/cmake/Modules/NetdataJSONC.cmake120
-rw-r--r--packaging/cmake/Modules/NetdataLibBPF.cmake102
-rw-r--r--packaging/cmake/Modules/NetdataProtobuf.cmake199
-rw-r--r--packaging/cmake/Modules/NetdataSentry.cmake30
-rw-r--r--packaging/cmake/Modules/NetdataUtil.cmake146
-rw-r--r--packaging/cmake/Modules/NetdataVersion.cmake51
-rw-r--r--packaging/cmake/Modules/NetdataYAML.cmake82
-rw-r--r--packaging/cmake/Modules/Packaging.cmake479
-rw-r--r--packaging/cmake/config.cmake.h.in194
-rw-r--r--packaging/cmake/pkg-files/copyright (renamed from contrib/debian/copyright)0
-rwxr-xr-xpackaging/cmake/pkg-files/deb/ebpf-code-legacy/postinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/ebpf-code-legacy/preinst11
-rw-r--r--packaging/cmake/pkg-files/deb/netdata/conffiles (renamed from contrib/debian/conffiles)0
-rw-r--r--packaging/cmake/pkg-files/deb/netdata/etc/default/netdata (renamed from contrib/debian/netdata.default)0
-rwxr-xr-xpackaging/cmake/pkg-files/deb/netdata/etc/init.d/netdata (renamed from contrib/debian/netdata.init)0
-rwxr-xr-xpackaging/cmake/pkg-files/deb/netdata/postinst53
-rwxr-xr-xpackaging/cmake/pkg-files/deb/netdata/postrm55
-rwxr-xr-xpackaging/cmake/pkg-files/deb/netdata/preinst26
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-apps/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-apps/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-chartsd/postinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-chartsd/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-cups/postinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-cups/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-debugfs/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-debugfs/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-ebpf/postinst12
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-ebpf/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-freeipmi/postinst12
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-freeipmi/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-go/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-go/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-network-viewer/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-network-viewer/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-nfacct/postinst12
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-nfacct/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-perf/postinst18
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-perf/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-pythond/postinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-pythond/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-slabinfo/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-slabinfo/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-systemd-journal/postinst15
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-systemd-journal/preinst11
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-xenstat/postinst12
-rwxr-xr-xpackaging/cmake/pkg-files/deb/plugin-xenstat/preinst11
-rw-r--r--packaging/current_libbpf.checksums1
-rw-r--r--packaging/current_libbpf.version1
-rw-r--r--packaging/dag/README.md23
-rw-r--r--packaging/dag/build_command.py65
-rw-r--r--packaging/dag/files/child_stream.conf10
-rw-r--r--packaging/dag/files/cmake-aarch64.sha2561
-rw-r--r--packaging/dag/files/cmake-x86_64.sha2561
-rw-r--r--packaging/dag/files/ol8-epel.repo6
-rw-r--r--packaging/dag/files/ol9-epel.repo6
-rw-r--r--packaging/dag/files/parent_stream.conf7
-rw-r--r--packaging/dag/imageutils.py1580
-rwxr-xr-xpackaging/dag/main.py18
-rw-r--r--packaging/dag/nd.py406
-rw-r--r--packaging/dag/requirements.txt3
-rw-r--r--packaging/dag/test_command.py128
-rw-r--r--packaging/docker/Dockerfile4
-rw-r--r--packaging/docker/README.md140
-rwxr-xr-xpackaging/docker/run.sh111
-rw-r--r--packaging/ebpf-co-re.checksums1
-rw-r--r--packaging/ebpf-co-re.version1
-rw-r--r--packaging/ebpf.checksums3
-rw-r--r--packaging/ebpf.version1
-rw-r--r--packaging/go.d.checksums20
-rw-r--r--packaging/go.d.version1
-rw-r--r--packaging/installer/README.md182
-rw-r--r--packaging/installer/REINSTALL.md17
-rw-r--r--packaging/installer/UNINSTALL.md6
-rw-r--r--packaging/installer/UPDATE.md22
-rwxr-xr-xpackaging/installer/dependencies/alpine.sh29
-rwxr-xr-xpackaging/installer/dependencies/arch.sh31
-rwxr-xr-xpackaging/installer/dependencies/centos.sh7
-rwxr-xr-xpackaging/installer/dependencies/debian.sh8
-rwxr-xr-xpackaging/installer/dependencies/fedora.sh14
-rwxr-xr-xpackaging/installer/dependencies/freebsd.sh19
-rwxr-xr-xpackaging/installer/dependencies/gentoo.sh34
-rwxr-xr-xpackaging/installer/dependencies/ol.sh7
-rwxr-xr-xpackaging/installer/dependencies/opensuse.sh7
-rwxr-xr-xpackaging/installer/dependencies/rockylinux.sh7
-rwxr-xr-xpackaging/installer/dependencies/ubuntu.sh7
-rw-r--r--packaging/installer/functions.sh344
-rwxr-xr-xpackaging/installer/install-required-packages.sh95
-rw-r--r--packaging/installer/installer.nsi128
-rwxr-xr-xpackaging/installer/kickstart.sh361
-rw-r--r--packaging/installer/methods/ansible.md10
-rw-r--r--packaging/installer/methods/aws.md14
-rw-r--r--packaging/installer/methods/azure.md14
-rw-r--r--packaging/installer/methods/freebsd.md12
-rw-r--r--packaging/installer/methods/gcp.md14
-rw-r--r--packaging/installer/methods/kickstart.md167
-rw-r--r--packaging/installer/methods/kubernetes.md10
-rw-r--r--packaging/installer/methods/macos.md28
-rw-r--r--packaging/installer/methods/manual.md18
-rw-r--r--packaging/installer/methods/methods.md12
-rw-r--r--packaging/installer/methods/offline.md6
-rw-r--r--packaging/installer/methods/packages.md23
-rw-r--r--packaging/installer/methods/source.md162
-rw-r--r--packaging/installer/methods/synology.md27
-rw-r--r--packaging/installer/methods/systems.md4
-rwxr-xr-xpackaging/installer/netdata-uninstaller.sh27
-rwxr-xr-xpackaging/installer/netdata-updater.sh243
-rwxr-xr-xpackaging/installer/package-windows.sh43
-rw-r--r--packaging/jsonc.checksums1
-rw-r--r--packaging/jsonc.version1
-rw-r--r--packaging/libbpf_0_0_9.checksums1
-rw-r--r--packaging/libbpf_0_0_9.version1
-rw-r--r--packaging/maintainers/README.md3
-rw-r--r--packaging/makeself/README.md2
-rwxr-xr-xpackaging/makeself/build-static.sh20
-rwxr-xr-xpackaging/makeself/build.sh5
-rw-r--r--packaging/makeself/bundled-packages.version (renamed from packaging/makeself/bundled-packages)0
-rwxr-xr-xpackaging/makeself/functions.sh2
-rwxr-xr-xpackaging/makeself/install-or-update.sh102
-rwxr-xr-xpackaging/makeself/jobs/20-openssl.install.sh2
-rwxr-xr-xpackaging/makeself/jobs/50-bash-5.1.16.install.sh2
-rwxr-xr-xpackaging/makeself/jobs/50-curl.install.sh2
-rwxr-xr-xpackaging/makeself/jobs/50-ioping-1.3.install.sh2
-rwxr-xr-xpackaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh2
-rwxr-xr-xpackaging/makeself/jobs/70-netdata-git.install.sh3
-rwxr-xr-xpackaging/makeself/jobs/90-netdata-runtime-check.sh44
-rwxr-xr-xpackaging/makeself/run-all-jobs.sh2
-rwxr-xr-xpackaging/makeself/uname2platform.sh1
-rw-r--r--packaging/protobuf.checksums1
-rw-r--r--packaging/protobuf.version1
-rw-r--r--packaging/repoconfig/CMakeLists.txt250
-rw-r--r--packaging/repoconfig/Makefile35
-rwxr-xr-xpackaging/repoconfig/build-deb.sh55
-rwxr-xr-xpackaging/repoconfig/build-rpm.sh44
-rw-r--r--packaging/repoconfig/deb.changelog49
-rw-r--r--packaging/repoconfig/debian/changelog25
-rw-r--r--packaging/repoconfig/debian/compat1
-rw-r--r--packaging/repoconfig/debian/control19
-rw-r--r--packaging/repoconfig/debian/copyright10
-rwxr-xr-xpackaging/repoconfig/debian/rules21
-rw-r--r--packaging/repoconfig/debian/source/format1
-rw-r--r--packaging/repoconfig/netdata-edge.repo.al21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.centos21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.fedora21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.ol21
-rw-r--r--packaging/repoconfig/netdata-edge.repo.suse19
-rw-r--r--packaging/repoconfig/netdata-repo.spec118
-rw-r--r--packaging/repoconfig/netdata.list.in2
-rw-r--r--packaging/repoconfig/netdata.repo.al21
-rw-r--r--packaging/repoconfig/netdata.repo.centos21
-rw-r--r--packaging/repoconfig/netdata.repo.dnf19
-rw-r--r--packaging/repoconfig/netdata.repo.fedora21
-rw-r--r--packaging/repoconfig/netdata.repo.ol21
-rw-r--r--packaging/repoconfig/netdata.repo.suse19
-rw-r--r--packaging/repoconfig/netdata.repo.zypp19
-rw-r--r--packaging/repoconfig/netdata.sources.in15
-rw-r--r--packaging/repoconfig/rpm.changelog18
-rwxr-xr-xpackaging/runtime-check.sh89
-rwxr-xr-xpackaging/utils/coverity-scan.sh214
-rw-r--r--packaging/utils/find-dll-deps.sh16
-rw-r--r--packaging/version2
-rw-r--r--packaging/windows/NetdataWhite.icobin0 -> 15134 bytes
-rwxr-xr-xpackaging/windows/bash_execute.sh19
-rw-r--r--packaging/windows/build.ps116
-rw-r--r--packaging/windows/clion-msys-mingw64-environment.bat17
-rw-r--r--packaging/windows/clion-msys-msys-environment.bat20
-rwxr-xr-xpackaging/windows/compile-on-windows.sh60
-rwxr-xr-xpackaging/windows/fetch-msys2-installer.py101
-rw-r--r--packaging/windows/functions.ps131
-rw-r--r--packaging/windows/install-dependencies.ps184
-rw-r--r--packaging/windows/installer.nsi186
-rw-r--r--packaging/windows/invoke-msys2.ps116
-rwxr-xr-xpackaging/windows/msys2-dependencies.sh50
-rwxr-xr-xpackaging/windows/package-windows.sh51
-rw-r--r--packaging/windows/package.ps116
-rw-r--r--packaging/windows/protoc.bat9
-rw-r--r--packaging/windows/win-build-dir.sh20
-rw-r--r--packaging/windows/windows-openssh-to-msys.bat118
-rw-r--r--packaging/yaml.checksums1
-rw-r--r--packaging/yaml.version1
194 files changed, 8014 insertions, 1640 deletions
diff --git a/packaging/PLATFORM_SUPPORT.md b/packaging/PLATFORM_SUPPORT.md
index 5448e5da4..ad1cd4168 100644
--- a/packaging/PLATFORM_SUPPORT.md
+++ b/packaging/PLATFORM_SUPPORT.md
@@ -61,25 +61,22 @@ to work on these platforms with minimal user effort.
| Alpine Linux | 3.18 | No | The latest release of Alpine Linux is guaranteed to remain at **Core** tier due to usage for our Docker images |
| Alma Linux | 9.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives |
| Alma Linux | 8.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives |
-| Amazon Linux | 2023 | x86\_64, AArch64 | |
-| Amazon Linux | 2 | x86\_64, AArch64 | |
+| Amazon Linux | 2023 | x86\_64, AArch64 | |
+| Amazon Linux | 2 | x86\_64, AArch64 | |
| CentOS | 7.x | x86\_64 | |
-| Docker | 19.03 or newer | x86\_64, i386, ARMv7, AArch64, POWER8+ | See our [Docker documentation](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md) for more info on using Netdata on Docker |
+| Docker | 19.03 or newer | x86\_64, i386, ARMv7, AArch64, POWER8+ | See our [Docker documentation](/packaging/docker/README.md) for more info on using Netdata on Docker |
| Debian | 12.x | x86\_64, i386, ARMv7, AArch64 | |
| Debian | 11.x | x86\_64, i386, ARMv7, AArch64 | |
-| Debian | 10.x | x86\_64, i386, ARMv7, AArch64 | |
+| Fedora | 40 | x86\_64, AArch64 | |
| Fedora | 39 | x86\_64, AArch64 | |
-| Fedora | 38 | x86\_64, AArch64 | |
-| Fedora | 37 | x86\_64, AArch64 | |
-| openSUSE | Leap 15.4 | x86\_64, AArch64 | |
| openSUSE | Leap 15.5 | x86\_64, AArch64 | |
+| openSUSE | Leap 15.4 | x86\_64, AArch64 | |
| Oracle Linux | 9.x | x86\_64, AArch64 | |
| Oracle Linux | 8.x | x86\_64, AArch64 | |
| Red Hat Enterprise Linux | 9.x | x86\_64, AArch64 | |
| Red Hat Enterprise Linux | 8.x | x86\_64, AArch64 | |
| Red Hat Enterprise Linux | 7.x | x86\_64 | |
-| Ubuntu | 23.10 | x86\_64, AArch64, ARMv7 | |
-| Ubuntu | 23.04 | x86\_64, AArch64, ARMv7 | |
+| Ubuntu | 24.04 | x86\_64, AArch64, ARMv7 | |
| Ubuntu | 22.04 | x86\_64, ARMv7, AArch64 | |
| Ubuntu | 20.04 | x86\_64, ARMv7, AArch64 | |
@@ -97,7 +94,6 @@ with minimal user effort.
|---------------|------------|--------------------------|------------------------------------------------------------------------------------------------------|
| Alpine Linux | Edge | No | |
| Alpine Linux | 3.17 | No | |
-| Alpine Linux | 3.16 | No | |
| Arch Linux | Latest | No | We officially recommend the community packages available for Arch Linux |
| Manjaro Linux | Latest | No | We officially recommend the community packages available for Arch Linux |
| openSUSE | Tumbleweed | x86\_64, AArch64 | Scheduled for promotion to Core tier at some point after the release of v1.41.0 of the Netdata Agent |
@@ -156,14 +152,17 @@ This is a list of platforms that we have supported in the recent past but no lon
| Platform | Version | Notes |
|--------------|-----------|----------------------|
+| Alpine Linux | 3.16 | EOL as of 2024-05-23 |
+| Alpine Linux | 3.15 | EOL as of 2023-11-01 |
| Alpine Linux | 3.14 | EOL as of 2023-05-01 |
-| Alpine Linux | 3.13 | EOL as of 2022-11-01 |
-| Debian | 9.x | EOL as of 2022-06-30 |
-| Fedora | 36 | EOL as of 2023-05-18 |
-| Fedora | 35 | EOL as of 2022-12-13 |
+| Debian | 10.x | EOL as of 2024-07-01 |
+| Fedora | 38 | EOL as of 2024-05-14 |
+| Fedora | 37 | EOL as of 2023-12-05 |
+| openSUSE | Leap 15.4 | EOL as of 2023-12-07 |
| openSUSE | Leap 15.3 | EOL as of 2022-12-01 |
+| Ubuntu | 23.10 | EOL as of 2024-07-01 |
+| Ubuntu | 23.04 | EOL as of 2024-01-20 |
| Ubuntu | 22.10 | EOL as of 2023-07-20 |
-| Ubuntu | 21.10 | EOL as of 2022-07-31 |
| Ubuntu | 18.04 | EOL as of 2023-04-02 |
## Static builds
@@ -175,9 +174,9 @@ means that they generally do not support non-local username mappings or exotic n
We currently provide static builds for the following CPU architectures:
-- 32-bit x86
- 64-bit x86
- ARMv7
+- ARMv6
- AArch64
- POWER8+
@@ -187,3 +186,8 @@ We currently provide static builds for the following CPU architectures:
Our IPMI collector is based on FreeIPMI. Due to upstream limitations in FreeIPMI, we are unable to support our
IPMI collector on POWER-based hardware.
+
+### Systemd
+
+Many of our systemd integrations are not supported in our static builds. This is due to a general refusal by the
+systemd developers to support static linking (or any C runtime other than glibc), and is not something we can resolve.
diff --git a/packaging/VERSIONING_AND_PUBLIC_API.md b/packaging/VERSIONING_AND_PUBLIC_API.md
index 79c537851..dc0a5def5 100644
--- a/packaging/VERSIONING_AND_PUBLIC_API.md
+++ b/packaging/VERSIONING_AND_PUBLIC_API.md
@@ -121,7 +121,7 @@ The following special exceptions to the public API exist:
guaranteed to be supported for in-place updates for at least two minor versions after the change happens. The
new format is not guaranteed to be backwards compatible.
- The list of supported platforms is functionally a part of the public API, but our existing [platform support
- policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md) dictates when and how
+ policy](/packaging/PLATFORM_SUPPORT.md) dictates when and how
support for specific platforms is added or removed.
- The list of components provided as separate packages in our official native packages is considered part of our
strictly defined public API, but changes to our packaging that do not alter the functionality of existing installs
diff --git a/packaging/build-package.sh b/packaging/build-package.sh
new file mode 100755
index 000000000..453e167f4
--- /dev/null
+++ b/packaging/build-package.sh
@@ -0,0 +1,99 @@
+#!/bin/sh
+#
+# Invoked by the package builder images to actually build native packages.
+
+set -e
+
+PKG_TYPE="${1}"
+BUILD_DIR="${2}"
+SCRIPT_SOURCE="$(
+ self=${0}
+ while [ -L "${self}" ]
+ do
+ cd "${self%/*}" || exit 1
+ self=$(readlink "${self}")
+ done
+ cd "${self%/*}" || exit 1
+ echo "$(pwd -P)/${self##*/}"
+)"
+SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_SOURCE}")")"
+
+CMAKE_ARGS="-S ${SOURCE_DIR} -B ${BUILD_DIR}"
+
+add_cmake_option() {
+ CMAKE_ARGS="${CMAKE_ARGS} -D${1}=${2}"
+}
+
+add_cmake_option CMAKE_BUILD_TYPE RelWithDebInfo
+add_cmake_option CMAKE_INSTALL_PREFIX /
+add_cmake_option ENABLE_ACLK On
+add_cmake_option ENABLE_CLOUD On
+add_cmake_option ENABLE_DBENGINE On
+add_cmake_option ENABLE_H2O On
+add_cmake_option ENABLE_ML On
+
+add_cmake_option ENABLE_PLUGIN_APPS On
+add_cmake_option ENABLE_PLUGIN_CGROUP_NETWORK On
+add_cmake_option ENABLE_PLUGIN_DEBUGFS On
+add_cmake_option ENABLE_PLUGIN_FREEIPMI On
+add_cmake_option ENABLE_PLUGIN_GO On
+add_cmake_option ENABLE_PLUGIN_LOCAL_LISTENERS On
+add_cmake_option ENABLE_PLUGIN_NFACCT On
+add_cmake_option ENABLE_PLUGIN_PERF On
+add_cmake_option ENABLE_PLUGIN_SLABINFO On
+add_cmake_option ENABLE_PLUGIN_SYSTEMD_JOURNAL On
+
+add_cmake_option ENABLE_EXPORTER_PROMETHEUS_REMOTE_WRITE On
+add_cmake_option ENABLE_EXPORTER_MONGODB On
+
+add_cmake_option ENABLE_BUNDLED_PROTOBUF Off
+add_cmake_option ENABLE_BUNDLED_JSONC Off
+add_cmake_option ENABLE_BUNDLED_YAML Off
+
+add_cmake_option BUILD_FOR_PACKAGING On
+
+case "${PKG_TYPE}" in
+ DEB)
+ case "$(dpkg-architecture -q DEB_TARGET_ARCH)" in
+ amd64)
+ add_cmake_option ENABLE_PLUGIN_XENSTAT On
+ add_cmake_option ENABLE_PLUGIN_EBPF On
+ ;;
+ arm64)
+ add_cmake_option ENABLE_PLUGIN_XENSTAT On
+ add_cmake_option ENABLE_PLUGIN_EBPF Off
+ ;;
+ *)
+ add_cmake_option ENABLE_PLUGIN_XENSTAT Off
+ add_cmake_option ENABLE_PLUGIN_EBPF Off
+ ;;
+ esac
+ ;;
+ RPM) ;;
+ *) echo "Unrecognized package type ${PKG_TYPE}." ; exit 1 ;;
+esac
+
+if [ "${ENABLE_SENTRY}" = "true" ]; then
+ if [ -z "${SENTRY_DSN}" ]; then
+ echo "ERROR: Sentry enabled but no DSN specified, exiting."
+ exit 1
+ fi
+
+ add_cmake_option ENABLE_SENTRY On
+ add_cmake_option NETDATA_SENTRY_ENVIRONMENT "${RELEASE_PIPELINE:-Unknown}"
+ add_cmake_option NETDATA_SENTRY_DIST "${BUILD_DESTINATION:-Unknown}"
+ add_cmake_option NETDATA_SENTRY_DSN "${SENTRY_DSN}"
+else
+ add_cmake_option ENABLE_SENTRY Off
+fi
+
+# shellcheck disable=SC2086
+cmake ${CMAKE_ARGS} -G Ninja
+cmake --build "${BUILD_DIR}" --parallel "$(nproc)" -- -k 1
+
+if [ "${ENABLE_SENTRY}" = "true" ] && [ "${UPLOAD_SENTRY}" = "true" ]; then
+ sentry-cli debug-files upload -o netdata-inc -p netdata-agent --force-foreground --log-level=debug --wait --include-sources build/netdata
+fi
+
+cd "${BUILD_DIR}" || exit 1
+cpack -V -G "${PKG_TYPE}"
diff --git a/packaging/bundle-ebpf-co-re.sh b/packaging/bundle-ebpf-co-re.sh
deleted file mode 100755
index 460709b67..000000000
--- a/packaging/bundle-ebpf-co-re.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-SRCDIR="${1}"
-
-CORE_VERSION="$(cat "${SRCDIR}/packaging/ebpf-co-re.version")"
-CORE_TARBALL="netdata-ebpf-co-re-glibc-${CORE_VERSION}.tar.xz"
-curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/ebpf-co-re/releases/download/${CORE_VERSION}/${CORE_TARBALL}" > "${CORE_TARBALL}" || exit 1
-grep "${CORE_TARBALL}" "${SRCDIR}/packaging/ebpf-co-re.checksums" | sha256sum -c - || exit 1
-tar -xa --no-same-owner -f "${CORE_TARBALL}" -C "${SRCDIR}/collectors/ebpf.plugin" || exit 1
diff --git a/packaging/bundle-ebpf.sh b/packaging/bundle-ebpf.sh
deleted file mode 100755
index 11930671f..000000000
--- a/packaging/bundle-ebpf.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-SRCDIR="${1}"
-PLUGINDIR="${2}"
-FORCE="${3}"
-
-EBPF_VERSION="$(cat "${SRCDIR}/packaging/ebpf.version")"
-EBPF_TARBALL="netdata-kernel-collector-glibc-${EBPF_VERSION}.tar.xz"
-
-if [ -x "${PLUGINDIR}/ebpf.plugin" ] || [ "${FORCE}" = "force" ]; then
- mkdir -p "${SRCDIR}/tmp/ebpf"
- curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1
- grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1
- tar -xva --no-same-owner -f "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1
- if [ ! -d "${PLUGINDIR}/ebpf.d" ];then
- mkdir "${PLUGINDIR}/ebpf.d"
- fi
- # shellcheck disable=SC2046
- cp -r $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d"
-fi
diff --git a/packaging/bundle-libbpf.sh b/packaging/bundle-libbpf.sh
deleted file mode 100755
index 52f7cf45e..000000000
--- a/packaging/bundle-libbpf.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-if [ "$(uname -m)" = x86_64 ]; then
- lib_subdir="lib64"
-else
- lib_subdir="lib"
-fi
-
-if [ "${2}" != "centos7" ]; then
- cp "${1}/packaging/current_libbpf.checksums" "${1}/packaging/libbpf.checksums"
- cp "${1}/packaging/current_libbpf.version" "${1}/packaging/libbpf.version"
-else
- cp "${1}/packaging/libbpf_0_0_9.checksums" "${1}/packaging/libbpf.checksums"
- cp "${1}/packaging/libbpf_0_0_9.version" "${1}/packaging/libbpf.version"
-fi
-
-LIBBPF_TARBALL="v$(cat "${1}/packaging/libbpf.version").tar.gz"
-LIBBPF_BUILD_PATH="${1}/externaldeps/libbpf/libbpf-$(cat "${1}/packaging/libbpf.version")"
-
-mkdir -p "${1}/externaldeps/libbpf" || exit 1
-curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libbpf/archive/${LIBBPF_TARBALL}" > "${LIBBPF_TARBALL}" || exit 1
-sha256sum -c "${1}/packaging/libbpf.checksums" || exit 1
-tar -xz --no-same-owner -f "${LIBBPF_TARBALL}" -C "${1}/externaldeps/libbpf" || exit 1
-make -C "${LIBBPF_BUILD_PATH}/src" BUILD_STATIC_ONLY=1 OBJDIR=build/ DESTDIR=../ install || exit 1
-cp -r "${LIBBPF_BUILD_PATH}/usr/${lib_subdir}/libbpf.a" "${1}/externaldeps/libbpf" || exit 1
-cp -r "${LIBBPF_BUILD_PATH}/usr/include" "${1}/externaldeps/libbpf" || exit 1
-cp -r "${LIBBPF_BUILD_PATH}/include/uapi" "${1}/externaldeps/libbpf/include" || exit 1
diff --git a/packaging/bundle-protobuf.sh b/packaging/bundle-protobuf.sh
deleted file mode 100755
index d715dfe3d..000000000
--- a/packaging/bundle-protobuf.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-PROTOBUF_TARBALL="protobuf-cpp-$(cat "${1}/packaging/protobuf.version").tar.gz"
-PROTOBUF_BUILD_PATH="${1}/externaldeps/protobuf/protobuf-$(cat "${1}/packaging/protobuf.version")"
-
-mkdir -p "${1}/externaldeps/protobuf" || exit 1
-curl -sSL --connect-timeout 10 --retry 3 "https://github.com/protocolbuffers/protobuf/releases/download/v$(cat "${1}/packaging/protobuf.version")/${PROTOBUF_TARBALL}" > "${PROTOBUF_TARBALL}" || exit 1
-sha256sum -c "${1}/packaging/protobuf.checksums" || exit 1
-tar -xz --no-same-owner -f "${PROTOBUF_TARBALL}" -C "${1}/externaldeps/protobuf" || exit 1
-OLDPWD="${PWD}"
-cd "${PROTOBUF_BUILD_PATH}" || exit 1
-./configure --disable-shared --without-zlib --disable-dependency-tracking --with-pic || exit 1
-make -j "$(nproc)" || exit 1
-cd "${OLDPWD}" || exit 1
-
-cp -a "${PROTOBUF_BUILD_PATH}/src" "${1}/externaldeps/protobuf" || exit 1
diff --git a/packaging/check-for-go-toolchain.sh b/packaging/check-for-go-toolchain.sh
new file mode 100644
index 000000000..fe5dabfad
--- /dev/null
+++ b/packaging/check-for-go-toolchain.sh
@@ -0,0 +1,176 @@
+#!/bin/sh
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-v3+
+#
+# Check if we need to install a Go toolchain.
+#
+# Scripts that use this should call the ensure_go_toolchain function
+# after sourcing this file to handle things correctly.
+#
+# If a working Go toolchain is either present or was installed, then the
+# function will return 0. If a working Go toolchain is not present and one
+# cannot be installed, then it will instead return 1, with the variable
+# GOLANG_FAILURE_REASON set to an error message indicating what went wrong.
+
+GOLANG_MIN_MAJOR_VERSION='1'
+GOLANG_MIN_MINOR_VERSION='22'
+GOLANG_MIN_PATCH_VERSION='0'
+GOLANG_MIN_VERSION="${GOLANG_MIN_MAJOR_VERSION}.${GOLANG_MIN_MINOR_VERSION}.${GOLANG_MIN_PATCH_VERSION}"
+
+GOLANG_TEMP_PATH="${TMPDIR}/go-toolchain"
+
+check_go_version() {
+ version="$("${go}" version | awk '{ print $3 }' | sed 's/^go//')"
+ version_major="$(echo "${version}" | cut -f 1 -d '.')"
+ version_minor="$(echo "${version}" | cut -f 2 -d '.')"
+ version_patch="$(echo "${version}" | cut -f 3 -d '.')"
+
+ if [ -z "${version_major}" ] || [ "${version_major}" -lt "${GOLANG_MIN_MAJOR_VERSION}" ]; then
+ return 1
+ elif [ "${version_major}" -gt "${GOLANG_MIN_MAJOR_VERSION}" ]; then
+ return 0
+ fi
+
+ if [ -z "${version_minor}" ] || [ "${version_minor}" -lt "${GOLANG_MIN_MINOR_VERSION}" ]; then
+ return 1
+ elif [ "${version_minor}" -gt "${GOLANG_MIN_MINOR_VERSION}" ]; then
+ return 0
+ fi
+
+ if [ -n "${version_patch}" ] && [ "${version_patch}" -ge "${GOLANG_MIN_PATCH_VERSION}" ]; then
+ return 0
+ fi
+
+ return 1
+}
+
+install_go_toolchain() {
+ GOLANG_ARCHIVE_NAME="${GOLANG_TEMP_PATH}/golang.tar.gz"
+ GOLANG_CHECKSUM_FILE="${GOLANG_TEMP_PATH}/golang.sha256sums"
+
+ case "$(uname -s)" in
+ Linux)
+ case "$(uname -m)" in
+ i?86)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-386.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="1e209c4abde069067ac9afb341c8003db6a210f8173c77777f02d3a524313da3"
+ ;;
+ x86_64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-amd64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="f6c8a87aa03b92c4b0bf3d558e28ea03006eb29db78917daec5cfb6ec1046265"
+ ;;
+ aarch64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-arm64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="6a63fef0e050146f275bf02a0896badfe77c11b6f05499bb647e7bd613a45a10"
+ ;;
+ armv*)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-armv6l.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="0525f92f79df7ed5877147bce7b955f159f3962711b69faac66bc7121d36dcc4"
+ ;;
+ ppc64le)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-ppc64le.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="0e57f421df9449066f00155ce98a5be93744b3d81b00ee4c2c9b511be2a31d93"
+ ;;
+ riscv64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-riscv64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="afe9cedcdbd6fdff27c57efd30aa5ce0f666f471fed5fa96cd4fb38d6b577086"
+ ;;
+ s390x)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-s390x.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="2e546a3583ba7bd3988f8f476245698f6a93dfa9fe206a8ca8f85c1ceecb2446"
+ ;;
+ *)
+ GOLANG_FAILURE_REASON="Linux $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself."
+ return 1
+ ;;
+ esac
+ ;;
+ FreeBSD)
+ case "$(uname -m)" in
+ 386)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-386.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="b8065da37783e8b9e7086365a54d74537e832c92311b61101a66989ab2458d8e"
+ ;;
+ amd64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-amd64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="50f421c7f217083ac94aab1e09400cb9c2fea7d337679ec11f1638a11460da30"
+ ;;
+ arm)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-arm.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="c9c8b305f90903536f4981bad9f029828c2483b3216ca1783777344fbe603f2d"
+ ;;
+ arm64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-arm64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="e23385e5c640787fa02cd58f2301ea09e162c4d99f8ca9fa6d52766f428a933d"
+ ;;
+ riscv64)
+ GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-riscv64.tar.gz"
+ GOLANG_ARCHIVE_CHECKSUM="c8f94d1de6024546194d58e7b9370dc7ea06176aad94a675b0062c25c40cb645"
+ ;;
+ *)
+ GOLANG_FAILURE_REASON="FreeBSD $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself."
+ return 1
+ ;;
+ esac
+ ;;
+ *)
+ GOLANG_FAILURE_REASON="We do not support automatic handling of a Go toolchain on this system, you must install one manually."
+ return 1
+ ;;
+ esac
+
+ if [ -d '/usr/local/go' ]; then
+ if [ -f '/usr/local/go/.installed-by-netdata' ]; then
+ rm -rf /usr/local/go
+ else
+ GOLANG_FAILURE_REASON="Refusing to overwrite existing Go toolchain install at /usr/local/go, it needs to be updated manually."
+ return 1
+ fi
+ fi
+
+ mkdir -p "${GOLANG_TEMP_PATH}"
+
+ if ! curl --fail -q -sSL --connect-timeout 10 --retry 3 --output "${GOLANG_ARCHIVE_NAME}" "${GOLANG_ARCHIVE_URL}"; then
+ GOLANG_FAILURE_REASON="Failed to download Go toolchain."
+ return 1
+ fi
+
+ echo "${GOLANG_ARCHIVE_CHECKSUM} ${GOLANG_ARCHIVE_NAME}" > "${GOLANG_CHECKSUM_FILE}"
+
+ if ! sha256sum -c "${GOLANG_CHECKSUM_FILE}"; then
+ GOLANG_FAILURE_REASON="Invalid checksum for downloaded Go toolchain."
+ return 1
+ fi
+
+ if ! tar -C /usr/local/ -xzf "${GOLANG_ARCHIVE_NAME}"; then
+ GOLANG_FAILURE_REASON="Failed to extract Go toolchain."
+ return 1
+ fi
+
+ touch /usr/local/go/.installed-by-netdata
+
+ rm -rf "${GOLANG_TEMP_PATH}"
+}
+
+ensure_go_toolchain() {
+ go="$(PATH="/usr/local/go/bin:${PATH}" command -v go 2>/dev/null)"
+
+ need_go_install=0
+
+ if [ -z "${go}" ]; then
+ need_go_install=1
+ elif ! check_go_version; then
+ need_go_install=1
+ fi
+
+ if [ "${need_go_install}" -eq 1 ]; then
+ if ! install_go_toolchain; then
+ return 1
+ fi
+
+ rm -rf "${GOLANG_TEMP_PATH}" || true
+ fi
+
+ return 0
+}
diff --git a/packaging/cmake/Modules/FindGo.cmake b/packaging/cmake/Modules/FindGo.cmake
new file mode 100644
index 000000000..69e23fda6
--- /dev/null
+++ b/packaging/cmake/Modules/FindGo.cmake
@@ -0,0 +1,68 @@
+# Custom CMake module to find the Go toolchain
+#
+# Copyright (c) 2024 Netdata Inc
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This is a relatively orthodox CMake Find Module. It can be used by
+# simply including it and then invoking `find_package(Go)`.
+#
+# Version handling is done by CMake itself via the
+# find_package_handle_standard_args() function, so `find_package(Go 1.21)`
+# will also work correctly.
+
+if(GO_FOUND)
+ return()
+endif()
+
+# The complexity below is needed to account for the complex rules we use for finding the Go install.
+#
+# If GOROOT is set, we honor that. Otherwise, we check known third-party install paths for the platform in question
+# and fall back to looking in PATH. For the specific case of MSYS2, we prefer a Windows install over an MSYS2 install.
+if(DEFINED $ENV{GOROOT})
+ find_program(GO_EXECUTABLE go PATHS "$ENV{GOROOT}/bin" DOC "Go toolchain" NO_DEFAULT_PATH)
+ set(GO_ROOT $ENV{GOROOT})
+elseif(OS_WINDOWS)
+ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ find_program(GO_EXECUTABLE go PATHS C:/go/bin "C:/Program Files/go/bin" DOC "Go toolchain" NO_DEFAULT_PATH)
+ else()
+ find_program(GO_EXECUTABLE go PATHS /c/go/bin "/c/Program Files/go/bin" /mingw64/lib/go/bin /ucrt64/lib/go/bin /clang64/lib/go/bin DOC "Go toolchain" NO_DEFAULT_PATH)
+ endif()
+else()
+ find_program(GO_EXECUTABLE go PATHS /usr/local/go/bin DOC "Go toolchain" NO_DEFAULT_PATH)
+endif()
+find_program(GO_EXECUTABLE go DOC "Go toolchain")
+
+if (GO_EXECUTABLE)
+ execute_process(
+ COMMAND ${GO_EXECUTABLE} version
+ OUTPUT_VARIABLE GO_VERSION_STRING
+ RESULT_VARIABLE RESULT
+ )
+ if (RESULT EQUAL 0)
+ string(REGEX MATCH "go([0-9]+\\.[0-9]+(\\.[0-9]+)?)" GO_VERSION_STRING "${GO_VERSION_STRING}")
+ string(REGEX MATCH "([0-9]+\\.[0-9]+(\\.[0-9]+)?)" GO_VERSION_STRING "${GO_VERSION_STRING}")
+ else()
+ unset(GO_VERSION_STRING)
+ endif()
+
+ if(NOT DEFINED GO_ROOT)
+ execute_process(
+ COMMAND ${GO_EXECUTABLE} env GOROOT
+ OUTPUT_VARIABLE GO_ROOT
+ RESULT_VARIABLE RESULT
+ )
+ if(RESULT EQUAL 0)
+ string(REGEX REPLACE "\n$" "" GO_ROOT "${GO_ROOT}")
+ else()
+ unset(GO_ROOT)
+ endif()
+ endif()
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(
+ Go
+ REQUIRED_VARS GO_EXECUTABLE GO_ROOT
+ VERSION_VAR GO_VERSION_STRING
+)
diff --git a/packaging/cmake/Modules/NetdataCompilerFlags.cmake b/packaging/cmake/Modules/NetdataCompilerFlags.cmake
new file mode 100644
index 000000000..28b43b4ec
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataCompilerFlags.cmake
@@ -0,0 +1,102 @@
+# Functions to simplify handling of extra compiler flags.
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+include(CheckCCompilerFlag)
+include(CheckCXXCompilerFlag)
+
+# Construct a pre-processor safe name
+#
+# This takes a specified value, and assigns the generated name to the
+# specified target.
+function(make_cpp_safe_name value target)
+ string(REPLACE "-" "_" tmp "${value}")
+ string(REPLACE "=" "_" tmp "${tmp}")
+ set(${target} "${tmp}" PARENT_SCOPE)
+endfunction()
+
+# Conditionally add an extra compiler flag to C and C++ flags.
+#
+# If the language flags already match the `match` argument, skip this flag.
+# Otherwise, check for support for `flag` and if support is found, add it to
+# the compiler flags for the run.
+function(add_simple_extra_compiler_flag match flag)
+ set(CMAKE_REQUIRED_FLAGS "-Werror")
+
+ make_cpp_safe_name("${flag}" flag_name)
+
+ if(NOT ${CMAKE_C_FLAGS} MATCHES ${match})
+ check_c_compiler_flag("${flag}" HAVE_C_${flag_name})
+ endif()
+
+ if(NOT ${CMAKE_CXX_FLAGS} MATCHES ${match})
+ check_cxx_compiler_flag("${flag}" HAVE_CXX_${flag_name})
+ endif()
+
+ if(HAVE_C_${flag_name} AND HAVE_CXX_${flag_name})
+ add_compile_options("${flag}")
+ add_link_options("${flag}")
+ endif()
+endfunction()
+
+# Same as add_simple_extra_compiler_flag, but check for a second flag if the
+# first one is unsupported.
+function(add_double_extra_compiler_flag match flag1 flag2)
+ set(CMAKE_REQUIRED_FLAGS "-Werror")
+
+ make_cpp_safe_name("${flag1}" flag1_name)
+ make_cpp_safe_name("${flag2}" flag2_name)
+
+ if(NOT ${CMAKE_C_FLAGS} MATCHES ${match})
+ check_c_compiler_flag("${flag1}" HAVE_C_${flag1_name})
+ if(NOT HAVE_C_${flag1_name})
+ check_c_compiler_flag("${flag2}" HAVE_C_${flag2_name})
+ endif()
+ endif()
+
+ if(NOT ${CMAKE_CXX_FLAGS} MATCHES ${match})
+ check_cxx_compiler_flag("${flag1}" HAVE_CXX_${flag1_name})
+ if(NOT HAVE_CXX_${flag1_name})
+ check_cxx_compiler_flag("${flag2}" HAVE_CXX_${flag2_name})
+ endif()
+ endif()
+
+ if(HAVE_C_${flag1_name} AND HAVE_CXX_${flag1_name})
+ add_compile_options("${flag1}")
+ add_link_options("${flag1}")
+ elseif(HAVE_C_${flag2_name} AND HAVE_CXX${flag2_name})
+ add_compile_options("${flag2}")
+ add_link_options("${flag2}")
+ endif()
+endfunction()
+
+if(CMAKE_BUILD_TYPE STREQUAL "Debug")
+ option(DISABLE_HARDENING "Disable adding extra compiler flags for hardening" TRUE)
+else()
+ option(DISABLE_HARDENING "Disable adding extra compiler flags for hardening" FALSE)
+endif()
+
+option(ENABLE_ADDRESS_SANITIZER "Build with address sanitizer enabled" False)
+mark_as_advanced(ENABLE_ADDRESS_SANITIZER)
+
+if(ENABLE_ADDRESS_SANITIZER)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
+endif()
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C_FLAGS}")
+
+if(NOT ${DISABLE_HARDENING})
+ add_double_extra_compiler_flag("stack-protector" "-fstack-protector-strong" "-fstack-protector")
+ add_double_extra_compiler_flag("_FORTIFY_SOURCE" "-D_FORTIFY_SOURCE=3" "-D_FORTIFY_SOURCE=2")
+ add_simple_extra_compiler_flag("stack-clash-protection" "-fstack-clash-protection")
+ add_simple_extra_compiler_flag("-fcf-protection" "-fcf-protection=full")
+ add_simple_extra_compiler_flag("branch-protection" "-mbranch-protection=standard")
+endif()
+
+foreach(FLAG function-sections data-sections)
+ add_simple_extra_compiler_flag("${FLAG}" "-f${FLAG}")
+endforeach()
+
+add_simple_extra_compiler_flag("-Wbuiltin-macro-redefined" "-Wno-builtin-macro-redefined")
+add_simple_extra_compiler_flag("-fexecptions" "-fexceptions")
diff --git a/packaging/cmake/Modules/NetdataDetectSystemd.cmake b/packaging/cmake/Modules/NetdataDetectSystemd.cmake
new file mode 100644
index 000000000..ecac7aaf8
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataDetectSystemd.cmake
@@ -0,0 +1,42 @@
+# CMake Module to handle all the systemd-related checks for Netdata.
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+macro(detect_systemd)
+ find_library(SYSTEMD_LIBRARY NAMES systemd)
+
+ set(ENABLE_DSYSTEMD_DBUS NO)
+ pkg_check_modules(SYSTEMD libsystemd)
+
+ if(SYSTEMD_FOUND)
+ set(CMAKE_REQUIRED_LIBRARIES_BEFORE_SYSTEMD "${CMAKE_REQUIRED_LIBRARIES}")
+ set(CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES};${SYSTEMD_LIBRARIES}")
+
+ check_c_source_compiles("
+ #include <systemd/sd-journal.h>
+
+ int main() {
+ int x = SD_JOURNAL_OS_ROOT;
+ return 0;
+ }" HAVE_SD_JOURNAL_OS_ROOT)
+
+ check_symbol_exists(SD_JOURNAL_OS_ROOT "systemd/sd-journal.h" HAVE_SD_JOURNAL_OS_ROOT)
+ check_symbol_exists(sd_journal_open_files_fd "systemd/sd-journal.h" HAVE_SD_JOURNAL_OPEN_FILES_FD)
+ check_symbol_exists(sd_journal_restart_fields "systemd/sd-journal.h" HAVE_SD_JOURNAL_RESTART_FIELDS)
+ check_symbol_exists(sd_journal_get_seqnum "systemd/sd-journal.h" HAVE_SD_JOURNAL_GET_SEQNUM)
+
+ check_symbol_exists(sd_bus_default_system "systemd/sd-bus.h" HAVE_SD_BUS_DEFAULT_SYSTEM)
+ check_symbol_exists(sd_bus_call_method "systemd/sd-bus.h" HAVE_SD_BUS_CALL_METHOD)
+ check_symbol_exists(sd_bus_message_enter_container "systemd/sd-bus.h" HAVE_SD_BUS_MESSAGE_ENTER_CONTAINER)
+ check_symbol_exists(sd_bus_message_read "systemd/sd-bus.h" HAVE_SD_BUS_MESSAGE_READ)
+ check_symbol_exists(sd_bus_message_exit_container "systemd/sd-bus.h" HAVE_SD_BUS_MESSAGE_EXIT_CONTAINER)
+
+ set(CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES_BEFORE_SYSTEMD}")
+
+ set(HAVE_SYSTEMD True)
+ if(HAVE_SD_BUS_DEFAULT_SYSTEM AND HAVE_SD_BUS_CALL_METHOD AND HAVE_SD_BUS_MESSAGE_ENTER_CONTAINER AND HAVE_SD_BUS_MESSAGE_READ AND HAVE_SD_BUS_MESSAGE_EXIT_CONTAINER)
+ set(ENABLE_SYSTEMD_DBUS YES)
+ endif()
+ endif()
+endmacro()
diff --git a/packaging/cmake/Modules/NetdataEBPFCORE.cmake b/packaging/cmake/Modules/NetdataEBPFCORE.cmake
new file mode 100644
index 000000000..f4c918bfe
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataEBPFCORE.cmake
@@ -0,0 +1,27 @@
+# Handling for eBPF CO-RE files
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+include(ExternalProject)
+
+set(ebpf-co-re_SOURCE_DIR "${CMAKE_BINARY_DIR}/ebpf-co-re")
+
+# Fetch and install our eBPF CO-RE files
+function(netdata_fetch_ebpf_co_re)
+ ExternalProject_Add(
+ ebpf-co-re
+ URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.5.1/netdata-ebpf-co-re-glibc-v1.4.5.1.tar.xz
+ URL_HASH SHA256=10d49602c873932a4e0a3717a4af2137434b480d0170c2fb000ec70ae02f6e30
+ SOURCE_DIR "${ebpf-co-re_SOURCE_DIR}"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+ EXCLUDE_FROM_ALL 1
+ )
+endfunction()
+
+function(netdata_add_ebpf_co_re_to_target _target)
+ add_dependencies(${_target} ebpf-co-re)
+ target_include_directories(${_target} BEFORE PRIVATE "${ebpf-co-re_SOURCE_DIR}")
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataEBPFLegacy.cmake b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake
new file mode 100644
index 000000000..12dfce486
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake
@@ -0,0 +1,48 @@
+# Handling for eBPF legacy programs
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+include(ExternalProject)
+include(NetdataUtil)
+
+set(ebpf-legacy_SOURCE_DIR "${CMAKE_BINARY_DIR}/ebpf-legacy")
+set(ebpf-legacy_BUILD_DIR "${CMAKE_BINARY_DIR}/ebpf-legacy-build")
+
+# Fetch the legacy eBPF code.
+function(netdata_fetch_legacy_ebpf_code)
+ netdata_identify_libc(_libc)
+
+ if(DEFINED BUILD_SHARED_LIBS)
+ if(NOT BUILD_SHARED_LIBS)
+ set(need_static TRUE)
+ endif()
+ endif()
+
+ if(need_static)
+ set(_hash 1c0c8f1177514e9e21a23c28841406595e57b7cfacd93746ff2d6b25987b94a6)
+ set(_libc "static")
+ elseif(_libc STREQUAL "glibc")
+ set(_hash e365a76a2bb25190f1d91e4dea2cfc5ff5db63b5238fbfbc89f72755cf85a12c)
+ elseif(_libc STREQUAL "musl")
+ set(_hash ec14dcdfa29d4fba1cea6763740b9d37683515bde88a1a29b6e7c70ce01a604d)
+ else()
+ message(FATAL_ERROR "Could not determine libc implementation, unable to install eBPF legacy code.")
+ endif()
+
+ ExternalProject_Add(
+ ebpf-code-legacy
+ URL https://github.com/netdata/kernel-collector/releases/download/v1.4.5.1/netdata-kernel-collector-${_libc}-v1.4.5.1.tar.xz
+ URL_HASH SHA256=${_hash}
+ SOURCE_DIR "${ebpf-legacy_SOURCE_DIR}"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND sh -c "mkdir -p ${ebpf-legacy_BUILD_DIR}/ebpf.d && mv ${ebpf-legacy_SOURCE_DIR}/*netdata_ebpf_*.o ${ebpf-legacy_BUILD_DIR}/ebpf.d"
+ INSTALL_COMMAND ""
+ )
+endfunction()
+
+function(netdata_install_legacy_ebpf_code)
+ install(DIRECTORY ${ebpf-legacy_BUILD_DIR}/ebpf.d
+ DESTINATION usr/libexec/netdata/plugins.d
+ COMPONENT ebpf-code-legacy)
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataFetchContentExtra.cmake b/packaging/cmake/Modules/NetdataFetchContentExtra.cmake
new file mode 100644
index 000000000..e82fe413b
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataFetchContentExtra.cmake
@@ -0,0 +1,54 @@
+# Extra tools for working with FetchContent on older CMake
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# FetchContent_MakeAvailable_NoInstall
+#
+# Add a sub-project with FetchContent, but with the EXCLUDE_FROM_ALL
+# argument for the add_subdirectory part.
+#
+# CMake 3.28 and newer provide a way to do this with an extra argument
+# on FetchContent_Declare, but older versions need you to implement
+# the logic yourself. Once we no longer support CMake versions older
+# than 3.28, we can get rid of this macro.
+#
+# Unlike FetchContent_MakeAvailble, this only accepts a single project
+# to make available.
+macro(FetchContent_MakeAvailable_NoInstall name)
+ include(FetchContent)
+
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.28)
+ FetchContent_MakeAvailable(${name})
+ else()
+ FetchContent_GetProperties(${name})
+
+ if(NOT ${name}_POPULATED)
+ FetchContent_Populate(${name})
+ add_subdirectory(${${name}_SOURCE_DIR} ${${name}_BINARY_DIR} EXCLUDE_FROM_ALL)
+ endif()
+ endif()
+endmacro()
+
+# NETDATA_PROPAGATE_TOOLCHAIN_ARGS
+#
+# Defines a set of CMake flags to be passed to CMAKE_ARGS for
+# FetchContent_Declare and ExternalProject_Add to ensure that toolchain
+# configuration propagates correctly to sub-projects.
+#
+# This needs to be explicitly included for any sub-project that needs
+# to be built for the target system.
+#
+# This also needs to _NOT_ have any generator expressions, as they are not
+# supported for the required usage of this variable in CMake 3.30 or newer.
+set(NETDATA_PROPAGATE_TOOLCHAIN_ARGS
+ "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
+
+if(DEFINED CMAKE_C_COMPILER_TARGET)
+ set(NETDATA_PROPAGATE_TOOLCHAIN_ARGS "${NETDATA_PROPAGATE_TOOLCHAIN_ARGS} -DCMAKE_C_COMPILER_TARGET=${CMAKE_C_COMPILER_TARGET}")
+endif()
+
+if(DEFINED CMAKE_CXX_COMPILER_TARGET)
+ set(NETDATA_PROPAGATE_TOOLCHAIN_ARGS "${NETDATA_PROPAGATE_TOOLCHAIN_ARGS} -DCMAKE_CXX_COMPILER_TARGET=${CMAKE_CXX_COMPILER_TARGET}")
+endif()
diff --git a/packaging/cmake/Modules/NetdataGoTools.cmake b/packaging/cmake/Modules/NetdataGoTools.cmake
new file mode 100644
index 000000000..c8b8b9c01
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataGoTools.cmake
@@ -0,0 +1,85 @@
+# Macros and functions to assist in working with Go
+#
+# Copyright (c) 2024 Netdata Inc
+#
+# SPDX-License-Identifier: GPL
+
+if(CMAKE_BUILD_TYPE STREQUAL Debug)
+ set(GO_LDFLAGS "-X github.com/netdata/netdata/go/plugins/pkg/buildinfo.Version=${NETDATA_VERSION_STRING}")
+else()
+ set(GO_LDFLAGS "-w -s -X github.com/netdata/netdata/go/plugins/pkg/buildinfo.Version=${NETDATA_VERSION_STRING}")
+endif()
+
+# add_go_target: Add a new target that needs to be built using the Go toolchain.
+#
+# Takes four arguments, the target name, the output artifact name, the
+# source tree for the Go module, and the sub-directory of that source tree
+# to pass to `go build`.
+#
+# The target itself will invoke `go build` in the specified source tree,
+# using the `-o` option to produce the final output artifact, and passing
+# the requested sub-directory as the final argument.
+#
+# This will also automatically construct the dependency list for the
+# target by finding all Go source files under the specified source tree
+# and then appending the go.mod and go.sum files from the root of the
+# source tree.
+macro(add_go_target target output build_src build_dir)
+ file(GLOB_RECURSE ${target}_DEPS CONFIGURE_DEPENDS "${build_src}/*.go")
+ list(APPEND ${target}_DEPS
+ "${build_src}/go.mod"
+ "${build_src}/go.sum"
+ )
+
+ add_custom_command(
+ OUTPUT ${output}
+ COMMAND "${CMAKE_COMMAND}" -E env GOROOT=${GO_ROOT} CGO_ENABLED=0 GOPROXY=https://proxy.golang.org,direct "${GO_EXECUTABLE}" build -buildvcs=false -ldflags "${GO_LDFLAGS}" -o "${CMAKE_BINARY_DIR}/${output}" "./${build_dir}"
+ DEPENDS ${${target}_DEPS}
+ COMMENT "Building Go component ${output}"
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/${build_src}"
+ VERBATIM
+ )
+ add_custom_target(
+ ${target} ALL
+ DEPENDS ${output}
+ )
+endmacro()
+
+# find_min_go_version: Determine the minimum Go version based on go.mod files
+#
+# Takes one argument, specifying a source tree to scan for go.mod files.
+#
+# All files found will be checked for a `go` directive, and the
+# MIN_GO_VERSION variable will be set to the highest version
+# number found among these directives.
+#
+# Only works on UNIX-like systems, because it has to process the go.mod
+# files in ways that CMake can't do on it's own.
+function(find_min_go_version src_tree)
+ message(STATUS "Determining minimum required version of Go for this build")
+
+ file(GLOB_RECURSE go_mod_files ${src_tree}/go.mod)
+
+ set(result 1.0)
+
+ foreach(f IN ITEMS ${go_mod_files})
+ message(VERBOSE "Checking Go version specified in ${f}")
+ execute_process(
+ COMMAND grep -E "^go .*$" ${f}
+ COMMAND cut -f 2 -d " "
+ RESULT_VARIABLE version_check_result
+ OUTPUT_VARIABLE go_mod_version
+ )
+
+ if(version_check_result EQUAL 0)
+ string(REGEX MATCH "([0-9]+\\.[0-9]+(\\.[0-9]+)?)" go_mod_version "${go_mod_version}")
+
+ if(go_mod_version VERSION_GREATER result)
+ set(result "${go_mod_version}")
+ endif()
+ endif()
+ endforeach()
+
+ message(STATUS "Minimum required Go version determined to be ${result}")
+ set(MIN_GO_VERSION "${result}" PARENT_SCOPE)
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataJSONC.cmake b/packaging/cmake/Modules/NetdataJSONC.cmake
new file mode 100644
index 000000000..89ec70265
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataJSONC.cmake
@@ -0,0 +1,120 @@
+# Functions and macros for handling of JSON-C
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Handle bundling of json-c.
+#
+# This pulls it in as a sub-project using FetchContent functionality.
+#
+# This needs to be a function and not a macro for variable scoping
+# reasons. All the things we care about from the sub-project are exposed
+# as targets, which are globally scoped and not function scoped.
+function(netdata_bundle_jsonc)
+ include(FetchContent)
+ include(NetdataFetchContentExtra)
+
+ message(STATUS "Preparing vendored copy of JSON-C")
+
+ if(ENABLE_BUNDLED_JSONC)
+ set(FETCHCONTENT_TRY_FIND_PACKAGE_MODE NEVER)
+ endif()
+
+ set(FETCHCONTENT_FULLY_DISCONNECTED Off)
+
+ # JSON-C supports older versions of CMake than we do, so set
+ # the correct values for the few policies we actually need.
+ set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
+
+ # JSON-C's build system does string comparisons against option
+ # values instead of treating them as booleans, so we need to use
+ # proper strings for option values instead of just setting them
+ # to true or false.
+ set(DISABLE_BSYMBOLIC ON)
+ set(DISABLE_WERROR ON)
+ set(DISABLE_EXTRA_LIBS ON)
+ set(BUILD_SHARED_LIBS OFF)
+ set(BUILD_STATIC_LIBS ON)
+ set(BUILD_APPS OFF)
+
+ set(repo https://github.com/json-c/json-c)
+ set(tag b4c371fa0cbc4dcbaccc359ce9e957a22988fb34) # json-c-0.17-20230812
+
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.28)
+ FetchContent_Declare(json-c
+ GIT_REPOSITORY ${repo}
+ GIT_TAG ${tag}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ EXCLUDE_FROM_ALL
+ )
+ else()
+ FetchContent_Declare(json-c
+ GIT_REPOSITORY ${repo}
+ GIT_TAG ${tag}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ )
+ endif()
+
+ FetchContent_MakeAvailable_NoInstall(json-c)
+
+ message(STATUS "Finished preparing vendored copy of JSON-C")
+endfunction()
+
+# Handle setup of json-c for the build.
+#
+# This will attempt to find json-c using pkg_check_modules. If it finds
+# a usable copy, that will be used. If not, it will bundle a vendored copy
+# as a sub-project.
+#
+# Irrespective of how json-c is to be included, library names,
+# include directories, and compile definitions will be specified in the
+# NETDATA_JSONC_* variables for later use.
+macro(netdata_detect_jsonc)
+ if(NOT ENABLE_BUNDLED_JSONC)
+ pkg_check_modules(JSONC json-c)
+ endif()
+
+ if(NOT JSONC_FOUND)
+ set(ENABLE_BUNDLED_JSONC True)
+ netdata_bundle_jsonc()
+ set(NETDATA_JSONC_LDFLAGS json-c)
+ set(NETDATA_JSONC_INCLUDE_DIRS ${PROJECT_BINARY_DIR}/include)
+ get_target_property(NETDATA_JSONC_CFLAGS_OTHER json-c INTERFACE_COMPILE_DEFINITIONS)
+
+ if(NETDATA_JSONC_CFLAGS_OTHER STREQUAL NETDATA_JSONC_CFLAGS_OTHER-NOTFOUND)
+ set(NETDATA_JSONC_CFLAGS_OTHER "")
+ endif()
+
+ add_custom_command(
+ OUTPUT ${PROJECT_BINARY_DIR}/include/json-c
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/include
+ COMMAND ${CMAKE_COMMAND} -E create_symlink ${json-c_BINARY_DIR} ${PROJECT_BINARY_DIR}/include/json-c
+ COMMENT "Create compatibility symlink for vendored JSON-C headers"
+ DEPENDS json-c
+ )
+ add_custom_target(
+ json-c-compat-link
+ DEPENDS ${PROJECT_BINARY_DIR}/include/json-c
+ )
+ else()
+ set(NETDATA_JSONC_LDFLAGS ${JSONC_LDFLAGS})
+ set(NETDATA_JSONC_CFLAGS_OTHER ${JSONC_CFLAGS_OTHER})
+ set(NETDATA_JSONC_INCLUDE_DIRS ${JSONC_INCLUDE_DIRS})
+ add_custom_target(json-c-compat-link)
+ endif()
+endmacro()
+
+# Add json-c as a public link dependency of the specified target.
+#
+# The specified target must already exist, and the netdata_detect_json-c
+# macro must have already been run at least once for this to work correctly.
+function(netdata_add_jsonc_to_target _target)
+ if(ENABLE_BUNDLED_JSONC)
+ target_include_directories(${_target} BEFORE PUBLIC ${NETDATA_JSONC_INCLUDE_DIRS})
+ else()
+ target_include_directories(${_target} PUBLIC ${NETDATA_JSONC_INCLUDE_DIRS})
+ endif()
+ target_compile_options(${_target} PUBLIC ${NETDATA_JSONC_CFLAGS_OTHER})
+ target_link_libraries(${_target} PUBLIC ${NETDATA_JSONC_LDFLAGS})
+ add_dependencies(${_target} json-c-compat-link)
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataLibBPF.cmake b/packaging/cmake/Modules/NetdataLibBPF.cmake
new file mode 100644
index 000000000..9c3bf6d2f
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataLibBPF.cmake
@@ -0,0 +1,102 @@
+# Handling for libbpf (used by the eBPF plugin)
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+include(ExternalProject)
+include(NetdataUtil)
+
+set(libbpf_SOURCE_DIR "${CMAKE_BINARY_DIR}/libbpf")
+
+# Check if the kernel is old enough that we need to use a legacy copy of eBPF.
+function(_need_legacy_libbpf _var)
+ if(FORCE_LEGACY_LIBBPF)
+ set(${_var} TRUE PARENT_SCOPE)
+ return()
+ endif()
+
+ netdata_detect_host_kernel_version()
+
+ if(HOST_KERNEL_VERSION VERSION_LESS "4.14.0")
+ set(${_var} TRUE PARENT_SCOPE)
+ else()
+ set(${_var} FALSE PARENT_SCOPE)
+ endif()
+endfunction()
+
+# Prepare a vendored copy of libbpf
+function(netdata_bundle_libbpf)
+ _need_legacy_libbpf(USE_LEGACY_LIBBPF)
+
+ if(USE_LEGACY_LIBBPF)
+ set(_libbpf_tag 673424c56127bb556e64095f41fd60c26f9083ec) # v0.0.9_netdata-1
+ else()
+ set(_libbpf_tag 6923eb970e22682eaedff79f5be4f9934b99cf50) # v1.4.5p_netdata
+ endif()
+
+ if(DEFINED BUILD_SHARED_LIBS)
+ if(NOT BUILD_SHARED_LIBS)
+ set(need_static TRUE)
+ endif()
+ endif()
+
+ if(NOT need_static)
+ netdata_identify_libc(_libc)
+
+ string(REGEX MATCH "glibc|musl" _libc_supported "${_libc}")
+
+ if(NOT _libc_supported)
+ message(FATAL_ERROR "This system’s libc (detected: ${_libc}) is not not supported by the eBPF plugin.")
+ endif()
+ endif()
+
+ find_program(MAKE_COMMAND make)
+
+ if(MAKE_COMMAND STREQUAL MAKE_COMMAND-NOTFOUND)
+ message(FATAL_ERROR "GNU Make is required when building the eBPF plugin, but could not be found.")
+ endif()
+
+ pkg_check_modules(ELF REQUIRED libelf)
+ pkg_check_modules(ZLIB REQUIRED zlib)
+
+ set(_libbpf_lib_dir lib)
+
+ if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ set(_libbpf_lib_dir lib64)
+ endif()
+
+ set(_libbpf_library "${libbpf_SOURCE_DIR}/usr/${_libbpf_lib_dir}/libbpf.a")
+
+ ExternalProject_Add(
+ libbpf
+ GIT_REPOSITORY https://github.com/netdata/libbpf.git
+ GIT_TAG ${_libbpf_tag}
+ SOURCE_DIR "${libbpf_SOURCE_DIR}"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ${MAKE_COMMAND} -C src CC=${CMAKE_C_COMPILER} BUILD_STATIC_ONLY=1 OBJDIR=build/ DESTDIR=../ install
+ BUILD_IN_SOURCE 1
+ BUILD_BYPRODUCTS "${_libbpf_library}"
+ INSTALL_COMMAND ""
+ EXCLUDE_FROM_ALL 1
+ )
+
+ add_library(libbpf_library STATIC IMPORTED GLOBAL)
+ set_property(
+ TARGET libbpf_library
+ PROPERTY IMPORTED_LOCATION "${_libbpf_library}"
+ )
+ set_property(
+ TARGET libbpf_library
+ PROPERTY INTERFACE_LINK_LIBRARIES "${ELF_LIBRARIES};${ZLIB_LIBRARIES}"
+ )
+ set(NETDATA_LIBBPF_INCLUDE_DIRECTORIES "${libbpf_SOURCE_DIR}/usr/include;${libbpf_SOURCE_DIR}/include;${ELF_INCLUDE_DIRECTORIES};${ZLIB_INCLUDE_DIRECTORIES}" PARENT_SCOPE)
+ set(NETDATA_LIBBPF_COMPILE_OPTIONS "${ELF_CFLAGS_OTHER};${ZLIB_CFLAGS_OTHER}" PARENT_SCOPE)
+endfunction()
+
+# Add libbpf as a link dependency for the given target.
+function(netdata_add_libbpf_to_target _target)
+ target_link_libraries(${_target} PUBLIC libbpf_library)
+ target_include_directories(${_target} BEFORE PUBLIC "${NETDATA_LIBBPF_INCLUDE_DIRECTORIES}")
+ target_compile_options(${_target} PUBLIC "${NETDATA_LIBBPF_COMPILE_OPTIONS}")
+ add_dependencies(${_target} libbpf)
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataProtobuf.cmake b/packaging/cmake/Modules/NetdataProtobuf.cmake
new file mode 100644
index 000000000..62448440e
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataProtobuf.cmake
@@ -0,0 +1,199 @@
+# Macros and functions for handling of Protobuf
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Prepare a vendored copy of Protobuf for use with Netdata.
+function(netdata_bundle_protobuf)
+ include(FetchContent)
+ include(NetdataFetchContentExtra)
+
+ set(PROTOBUF_TAG f0dc78d7e6e331b8c6bb2d5283e06aa26883ca7c) # v21.12
+ set(NEED_ABSL False)
+
+ if(CMAKE_CXX_STANDARD GREATER_EQUAL 14)
+ set(PROTOBUF_TAG 4a2aef570deb2bfb8927426558701e8bfc26f2a4) # v25.3
+ set(NEED_ABSL True)
+ set(ABSL_TAG 2f9e432cce407ce0ae50676696666f33a77d42ac) # 20240116.1
+ endif()
+
+ set(FETCHCONTENT_TRY_FIND_PACKAGE_MODE NEVER)
+
+ string(REPLACE "-fsanitize=address" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
+ string(REPLACE "-fsanitize=address" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+
+ # ignore debhelper
+ set(FETCHCONTENT_FULLY_DISCONNECTED Off)
+
+ if(NEED_ABSL)
+ set(ABSL_PROPAGATE_CXX_STD On)
+ set(ABSL_ENABLE_INSTALL Off)
+ set(BUILD_SHARED_LIBS Off)
+ set(absl_repo https://github.com/abseil/abseil-cpp)
+
+ message(STATUS "Preparing bundled Abseil (required by bundled Protobuf)")
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.28)
+ FetchContent_Declare(absl
+ GIT_REPOSITORY ${absl_repo}
+ GIT_TAG ${ABSL_TAG}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ EXCLUDE_FROM_ALL
+ )
+ else()
+ FetchContent_Declare(absl
+ GIT_REPOSITORY ${absl_repo}
+ GIT_TAG ${ABSL_TAG}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ )
+ endif()
+ FetchContent_MakeAvailable_NoInstall(absl)
+ message(STATUS "Finished preparing bundled Abseil")
+ endif()
+
+ set(protobuf_INSTALL Off)
+ set(protobuf_BUILD_LIBPROTOC Off)
+ set(protobuf_BUILD_TESTS Off)
+ set(protobuf_BUILD_SHARED_LIBS Off)
+ set(protobuf_repo https://github.com/protocolbuffers/protobuf)
+
+ message(STATUS "Preparing bundled Protobuf")
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.28)
+ FetchContent_Declare(protobuf
+ GIT_REPOSITORY ${protobuf_repo}
+ GIT_TAG ${PROTOBUF_TAG}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ EXCLUDE_FROM_ALL
+ )
+ else()
+ FetchContent_Declare(protobuf
+ GIT_REPOSITORY ${protobuf_repo}
+ GIT_TAG ${PROTOBUF_TAG}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ )
+ endif()
+ FetchContent_MakeAvailable_NoInstall(protobuf)
+ message(STATUS "Finished preparing bundled Protobuf.")
+
+ set(ENABLE_BUNDLED_PROTOBUF True PARENT_SCOPE)
+endfunction()
+
+# Handle detection of Protobuf
+macro(netdata_detect_protobuf)
+ if(OS_WINDOWS)
+ set(PROTOBUF_PROTOC_EXECUTABLE "$ENV{PROTOBUF_PROTOC_EXECUTABLE}")
+ if(NOT PROTOBUF_PROTOC_EXECUTABLE)
+ set(PROTOBUF_PROTOC_EXECUTABLE "/bin/protoc")
+ endif()
+ set(PROTOBUF_CFLAGS_OTHER "")
+ set(PROTOBUF_INCLUDE_DIRS "")
+ set(PROTOBUF_LIBRARIES "-lprotobuf")
+
+ set(ENABLE_PROTOBUF True)
+ set(HAVE_PROTOBUF True)
+ else()
+ if(NOT ENABLE_BUNDLED_PROTOBUF)
+ if (NOT BUILD_SHARED_LIBS)
+ set(Protobuf_USE_STATIC_LIBS On)
+ endif()
+
+ # The FindProtobuf CMake module shipped by upstream CMake is
+ # broken for Protobuf version 22.0 and newer because it does
+ # not correctly pull in the new Abseil dependencies. Protobuf
+ # itself sometimes ships a CMake Package Configuration module
+ # that _does_ work correctly, so use that in preference to the
+ # Find module shipped with CMake.
+ #
+ # The code below works by first attempting to use find_package
+ # in config mode, and then checking for the existence of the
+ # target we actually use that gets defined by the protobuf
+ # CMake Package Configuration Module to determine if that
+ # worked. A bit of extra logic is required in the case of the
+ # config mode working, because some systems ship compatibility
+ # logic for the old FindProtobuf module while others do not.
+ #
+ # Upstream bug reference: https://gitlab.kitware.com/cmake/cmake/-/issues/24321
+ find_package(Protobuf CONFIG)
+
+ if(NOT TARGET protobuf::libprotobuf)
+ message(STATUS "Could not find Protobuf using Config mode, falling back to Module mode")
+ find_package(Protobuf REQUIRED)
+ endif()
+ endif()
+
+ if(TARGET protobuf::libprotobuf)
+ if(NOT Protobuf_PROTOC_EXECUTABLE AND TARGET protobuf::protoc)
+ set(Protobuf_PROTOC_EXECUTABLE protobuf::protoc)
+ endif()
+
+ # It is technically possible that this may still not
+ # be set by this point, so we need to check it and
+ # fail noisily if it isn't because the build won't
+ # work without it.
+ if(NOT Protobuf_PROTOC_EXECUTABLE)
+ message(FATAL_ERROR "Could not determine the location of the protobuf compiler for the detected version of protobuf.")
+ endif()
+
+ set(PROTOBUF_PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE})
+ set(PROTOBUF_LIBRARIES protobuf::libprotobuf)
+ endif()
+
+ set(ENABLE_PROTOBUF True)
+ set(HAVE_PROTOBUF True)
+ endif()
+endmacro()
+
+# Helper function to compile protocol definitions into C++ code.
+function(netdata_protoc_generate_cpp INC_DIR OUT_DIR SRCS HDRS)
+ if(NOT ARGN)
+ message(SEND_ERROR "Error: protoc_generate_cpp() called without any proto files")
+ return()
+ endif()
+
+ set(${INC_DIR})
+ set(${OUT_DIR})
+ set(${SRCS})
+ set(${HDRS})
+
+ foreach(FIL ${ARGN})
+ get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
+ get_filename_component(DIR ${ABS_FIL} DIRECTORY)
+ get_filename_component(FIL_WE ${FIL} NAME_WE)
+
+ set(GENERATED_PB_CC "${DIR}/${FIL_WE}.pb.cc")
+ list(APPEND ${SRCS} ${GENERATED_PB_CC})
+
+ set(GENERATED_PB_H "${DIR}/${FIL_WE}.pb.h")
+ list(APPEND ${HDRS} ${GENERATED_PB_H})
+
+ list(APPEND _PROTOC_INCLUDE_DIRS ${INC_DIR})
+
+ if(ENABLE_BUNDLED_PROTOBUF)
+ list(APPEND _PROTOC_INCLUDE_DIRS ${CMAKE_BINARY_DIR}/_deps/protobuf-src/src/)
+ endif()
+
+ add_custom_command(OUTPUT ${GENERATED_PB_CC} ${GENERATED_PB_H}
+ COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
+ ARGS "-I$<JOIN:${_PROTOC_INCLUDE_DIRS},;-I>" --cpp_out=${OUT_DIR} ${ABS_FIL}
+ DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE}
+ COMMENT "Running C++ protocol buffer compiler on ${FIL}"
+ COMMAND_EXPAND_LISTS)
+ endforeach()
+
+ set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
+ set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES COMPILE_OPTIONS -Wno-deprecated-declarations)
+
+ set(${SRCS} ${${SRCS}} PARENT_SCOPE)
+ set(${HDRS} ${${HDRS}} PARENT_SCOPE)
+endfunction()
+
+# Add protobuf to a specified target.
+function(netdata_add_protobuf _target)
+ if(ENABLE_BUNDLED_PROTOBUF)
+ target_include_directories(${_target} BEFORE PRIVATE ${PROTOBUF_INCLUDE_DIRS})
+ else()
+ target_include_directories(${_target} PRIVATE ${PROTOBUF_INCLUDE_DIRS})
+ endif()
+
+ target_compile_options(${_target} PRIVATE ${PROTOBUF_CFLAGS_OTHER})
+ target_link_libraries(${_target} PRIVATE ${PROTOBUF_LIBRARIES})
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataSentry.cmake b/packaging/cmake/Modules/NetdataSentry.cmake
new file mode 100644
index 000000000..b20aeedd5
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataSentry.cmake
@@ -0,0 +1,30 @@
+# Functions and macros for handling of Sentry
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Handle bundling of Sentry.
+#
+# This pulls it in as a sub-project using FetchContent functionality.
+#
+# This needs to be a function and not a macro for variable scoping
+# reasons. All the things we care about from the sub-project are exposed
+# as targets, which are globally scoped and not function scoped.
+function(netdata_bundle_sentry)
+ include(FetchContent)
+
+ # ignore debhelper
+ set(FETCHCONTENT_FULLY_DISCONNECTED Off)
+
+ set(SENTRY_VERSION 0.6.6)
+ set(SENTRY_BACKEND "breakpad")
+ set(SENTRY_BUILD_SHARED_LIBS OFF)
+
+ FetchContent_Declare(
+ sentry
+ GIT_REPOSITORY https://github.com/getsentry/sentry-native.git
+ GIT_TAG c97bcc63fa89ae557cef9c9b6e3acb11a72ff97d # v0.6.6
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ )
+ FetchContent_MakeAvailable(sentry)
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataUtil.cmake b/packaging/cmake/Modules/NetdataUtil.cmake
new file mode 100644
index 000000000..c6a13309f
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataUtil.cmake
@@ -0,0 +1,146 @@
+# Utility functions used by other modules.
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+include_guard()
+
+# Determine the version of the host kernel.
+#
+# Only works on UNIX-like systems, stores the version in the cache
+# variable HOST_KERNEL_VERSION.
+function(netdata_detect_host_kernel_version)
+ if(DEFINED HOST_KERNEL_VERSION)
+ return()
+ endif()
+
+ message(CHECK_START "Determining host kernel version")
+
+ if(NOT CMAKE_CROSSCOMPILING)
+ include(CheckIncludeFile)
+
+ check_include_file("linux/version.h" CAN_USE_VERSION_H)
+
+ if(CAN_USE_VERSION_H)
+ message(CHECK_START "Checking version using linux/version.h")
+ file(WRITE "${CMAKE_BINARY_DIR}/kversion-test.c" "
+ #include <stdio.h>
+ #include <linux/version.h>
+
+ int main() {
+ printf(\"%i.%i.%i\", LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL);
+ }
+ ")
+
+ try_run(_run_success _compile_success
+ ${CMAKE_BINARY_DIR}
+ SOURCES ${CMAKE_BINARY_DIR}/kversion-test.c
+ RUN_OUTPUT_VARIABLE _kversion_output)
+
+ if(_compile_success AND _run_success EQUAL 0)
+ message(CHECK_PASS "success")
+ set(_kversion_value "${_kversion_output}")
+ else()
+ message(CHECK_FAIL "failed")
+ endif()
+ endif()
+ endif()
+
+ if(NOT DEFINED _kversion_value)
+ message(CHECK_START "Checking version using uname")
+ execute_process(COMMAND uname -r
+ RESULT_VARIABLE _uname_result
+ OUTPUT_VARIABLE _uname_output)
+
+ if(NOT _uname_result EQUAL 0)
+ message(CHECK_FAIL "failed")
+ message(CHECK_FAIL "unknown")
+ set(HOST_KERNEL_VERSION "0.0.0" CACHE STRING "Detected host kernel version")
+ return()
+ else()
+ message(CHECK_PASS "success")
+ endif()
+
+ set(_kversion_value "${_uname_output}")
+ endif()
+
+ string(REGEX REPLACE "-.+$" "" _kversion "${_kversion_value}")
+ message(CHECK_PASS "${_kversion}")
+ set(HOST_KERNEL_VERSION "${_kversion}" CACHE STRING "Detected host kernel version")
+endfunction()
+
+# Check what libc we're using.
+#
+# Sets the specified variable to the name of the libc or "unknown"
+function(netdata_identify_libc _libc_name)
+ if(NOT DEFINED _ND_DETECTED_LIBC)
+ message(CHECK_START "Detecting libc implementation using ldd")
+
+ execute_process(COMMAND ldd --version
+ COMMAND grep -q -i -E "glibc|gnu libc"
+ RESULT_VARIABLE LDD_RESULT
+ OUTPUT_VARIABLE LDD_OUTPUT
+ ERROR_VARIABLE LDD_OUTPUT)
+
+ if(NOT LDD_RESULT)
+ set(${_libc_name} glibc PARENT_SCOPE)
+ set(_ND_DETECTED_LIBC glibc CACHE INTERNAL "")
+ message(CHECK_PASS "glibc")
+ return()
+ endif()
+
+ execute_process(COMMAND sh -c "ldd --version 2>&1 | grep -q -i 'musl'"
+ RESULT_VARIABLE LDD_RESULT
+ OUTPUT_VARIABLE LDD_OUTPUT
+ ERROR_VARIABLE LDD_OUTPUT)
+
+ if(NOT LDD_RESULT)
+ set(${_libc_name} musl PARENT_SCOPE)
+ set(_ND_DETECTED_LIBC musl CACHE INTERNAL "")
+ message(CHECK_PASS "musl")
+ return()
+ endif()
+
+ message(CHECK_FAIL "unknown")
+
+ message(CHECK_START "Looking for libc.so.6")
+ find_program(LIBC_PATH libc.so.6
+ PATHS /lib /lib64 /usr/lib /usr/lib64
+ NO_DEFAULT_PATH
+ NO_PACKAGE_ROOT_PATH
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH
+ NO_CMAKE_INSTALL_PREFIX
+ NO_CMAKE_FIND_ROOT_PATH)
+
+ if(NOT "${LIBC_PATH}" EQUAL "LIBC_PATH-NOTFOUND")
+ message(CHECK_PASS "found")
+ message(CHECK_START "Detecting libc implementation using libc.so.6")
+
+ execute_process(COMMAND "${LIBC_PATH}"
+ COMMAND head -n 1
+ COMMAND grep -q -i -E "gnu libc|gnu c library"
+ RESULT_VARIABLE LIBC_RESULT
+ OUTPUT_VARIABLE LIBC_OUTPUT
+ ERROR_VARIABLE LIBC_ERROR)
+
+ if(NOT LIBC_RESULT)
+ set(${_libc_name} glibc PARENT_SCOPE)
+ set(_ND_DETECTED_LIBC glibc CACHE INTERNAL "")
+ message(CHECK_PASS "glibc")
+ return()
+ else()
+ message(CHECK_FAIL "unknown")
+ endif()
+ else()
+ message(CHECK_FAIL "not found")
+ endif()
+
+ set(${_libc_name} unknown PARENT_SCOPE)
+ set(_ND_DETECTED_LIBC unknown CACHE INTERNAL "")
+ else()
+ set(${_libc_name} ${_ND_DETECTED_LIBC} PARENT_SCOPE)
+ endif()
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataVersion.cmake b/packaging/cmake/Modules/NetdataVersion.cmake
new file mode 100644
index 000000000..b4bdd43b0
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataVersion.cmake
@@ -0,0 +1,51 @@
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Function to provide information regarding the Netdata version.
+#
+# The high-level logic is (a) use git-describe, (b) fallback to info from
+# packaging/version. This version field are used for cmake's project,
+# cpack's packaging, and the agent's functionality.
+function(netdata_version)
+ find_package(Git)
+
+ if(GIT_EXECUTABLE)
+ execute_process(COMMAND ${GIT_EXECUTABLE} describe
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ RESULT_VARIABLE GIT_DESCRIBE_RESULT
+ OUTPUT_VARIABLE GIT_DESCRIBE_OUTPUT)
+ if(GIT_DESCRIBE_RESULT)
+ file(STRINGS "${CMAKE_SOURCE_DIR}/packaging/version" GIT_DESCRIBE_OUTPUT)
+ message(WARNING "using version from packaging/version: '${GIT_DESCRIBE_OUTPUT}'")
+ endif()
+ else()
+ file(STRINGS packaging/version GIT_DESCRIBE_OUTPUT)
+ message(WARNING "using version from packaging/version: '${GIT_DESCRIBE_OUTPUT}'")
+ endif()
+
+ string(STRIP ${GIT_DESCRIBE_OUTPUT} GIT_DESCRIBE_OUTPUT)
+ set(NETDATA_VERSION_STRING "${GIT_DESCRIBE_OUTPUT}" PARENT_SCOPE)
+
+ string(REGEX MATCH "v?([0-9]+)\\.([0-9]+)\\.([0-9]+)-?([0-9]+)?-?([0-9a-zA-Z]+)?" MATCHES "${GIT_DESCRIBE_OUTPUT}")
+ if(CMAKE_MATCH_COUNT EQUAL 3)
+ set(NETDATA_VERSION_MAJOR ${CMAKE_MATCH_1} PARENT_SCOPE)
+ set(NETDATA_VERSION_MINOR ${CMAKE_MATCH_2} PARENT_SCOPE)
+ set(NETDATA_VERSION_PATCH ${CMAKE_MATCH_3} PARENT_SCOPE)
+ set(NETDATA_VERSION_TWEAK 0 PARENT_SCOPE)
+ set(NETDATA_VERSION_DESCR "N/A" PARENT_SCOPE)
+ elseif(CMAKE_MATCH_COUNT EQUAL 4)
+ set(NETDATA_VERSION_MAJOR ${CMAKE_MATCH_1} PARENT_SCOPE)
+ set(NETDATA_VERSION_MINOR ${CMAKE_MATCH_2} PARENT_SCOPE)
+ set(NETDATA_VERSION_PATCH ${CMAKE_MATCH_3} PARENT_SCOPE)
+ set(NETDATA_VERSION_TWEAK ${CMAKE_MATCH_4} PARENT_SCOPE)
+ set(NETDATA_VERSION_DESCR "N/A" PARENT_SCOPE)
+ elseif(CMAKE_MATCH_COUNT EQUAL 5)
+ set(NETDATA_VERSION_MAJOR ${CMAKE_MATCH_1} PARENT_SCOPE)
+ set(NETDATA_VERSION_MINOR ${CMAKE_MATCH_2} PARENT_SCOPE)
+ set(NETDATA_VERSION_PATCH ${CMAKE_MATCH_3} PARENT_SCOPE)
+ set(NETDATA_VERSION_TWEAK ${CMAKE_MATCH_4} PARENT_SCOPE)
+ set(NETDATA_VERSION_DESCR ${CMAKE_MATCH_5} PARENT_SCOPE)
+ else()
+ message(FATAL_ERROR "Wrong version regex match count ${CMAKE_MATCH_COUNT} (should be in 3, 4 or 5)")
+ endif()
+endfunction()
diff --git a/packaging/cmake/Modules/NetdataYAML.cmake b/packaging/cmake/Modules/NetdataYAML.cmake
new file mode 100644
index 000000000..9fc713254
--- /dev/null
+++ b/packaging/cmake/Modules/NetdataYAML.cmake
@@ -0,0 +1,82 @@
+# Functions and macros for handling of libYAML
+#
+# Copyright (c) 2024 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Handle bundling of libyaml.
+#
+# This pulls it in as a sub-project using FetchContent functionality.
+#
+# This needs to be a function and not a macro for variable scoping
+# reasons. All the things we care about from the sub-project are exposed
+# as targets, which are globally scoped and not function scoped.
+function(netdata_bundle_libyaml)
+ include(FetchContent)
+ include(NetdataFetchContentExtra)
+
+ if(ENABLE_BUNDLED_LIBYAML)
+ set(FETCHCONTENT_TRY_FIND_PACKAGE_MODE NEVER)
+ endif()
+
+ set(FETCHCONTENT_FULLY_DISCONNECTED Off)
+ set(repo https://github.com/yaml/libyaml)
+ set(tag 2c891fc7a770e8ba2fec34fc6b545c672beb37e6) # v0.2.5
+
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.28)
+ FetchContent_Declare(yaml
+ GIT_REPOSITORY ${repo}
+ GIT_TAG ${tag}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ EXCLUDE_FROM_ALL
+ )
+ else()
+ FetchContent_Declare(yaml
+ GIT_REPOSITORY ${repo}
+ GIT_TAG ${tag}
+ CMAKE_ARGS ${NETDATA_CMAKE_PROPAGATE_TOOLCHAIN_ARGS}
+ )
+ endif()
+
+ FetchContent_MakeAvailable_NoInstall(yaml)
+endfunction()
+
+# Handle setup of libyaml for the build.
+#
+# This will attempt to find libyaml using pkg_check_modules. If it finds
+# a usable copy, that will be used. If not, it will bundle a vendored copy
+# as a sub-project.
+#
+# Irrespective of how libyaml is to be included, library names,
+# include directories, and compile definitions will be specified in the
+# NETDATA_YAML_* variables for later use.
+macro(netdata_detect_libyaml)
+ set(HAVE_LIBYAML True)
+
+ pkg_check_modules(YAML yaml-0.1)
+
+ if(ENABLE_BUNDLED_LIBYAML OR NOT YAML_FOUND)
+ netdata_bundle_libyaml()
+ set(ENABLE_BUNDLED_LIBYAML True PARENT_SCOPE)
+ set(NETDATA_YAML_LDFLAGS yaml)
+ get_target_property(NETDATA_YAML_INCLUDE_DIRS yaml INTERFACE_INCLUDE_DIRECTORIES)
+ get_target_property(NETDATA_YAML_CFLAGS_OTHER yaml INTERFACE_COMPILE_DEFINITIONS)
+ else()
+ set(NETDATA_YAML_LDFLAGS ${YAML_LDFLAGS})
+ set(NETDATA_YAML_CFLAGS_OTHER ${YAML_CFLAGS_OTHER})
+ set(NETDATA_YAML_INCLUDE_DIRS ${YAML_INCLUDE_DIRS})
+ endif()
+endmacro()
+
+# Add libyaml as a public link dependency of the specified target.
+#
+# The specified target must already exist, and the netdata_detect_libyaml
+# macro must have already been run at least once for this to work correctly.
+function(netdata_add_libyaml_to_target _target)
+ if(ENABLE_BUNDLED_LIBYAML)
+ target_include_directories(${_target} BEFORE PUBLIC ${NETDATA_YAML_INCLUDE_DIRS})
+ else()
+ target_include_directories(${_target} PUBLIC ${NETDATA_YAML_INCLUDE_DIRS})
+ endif()
+ target_compile_options(${_target} PUBLIC ${NETDATA_YAML_CFLAGS_OTHER})
+ target_link_libraries(${_target} PUBLIC ${NETDATA_YAML_LDFLAGS})
+endfunction()
diff --git a/packaging/cmake/Modules/Packaging.cmake b/packaging/cmake/Modules/Packaging.cmake
new file mode 100644
index 000000000..663dbe27c
--- /dev/null
+++ b/packaging/cmake/Modules/Packaging.cmake
@@ -0,0 +1,479 @@
+#
+# CPack options
+#
+
+if(NETDATA_VERSION_DESCR STREQUAL "N/A")
+ set(CPACK_PACKAGE_VERSION ${NETDATA_VERSION_MAJOR}.${NETDATA_VERSION_MINOR}.${NETDATA_VERSION_PATCH})
+else()
+ set(CPACK_PACKAGE_VERSION ${NETDATA_VERSION_MAJOR}.${NETDATA_VERSION_MINOR}.${NETDATA_VERSION_PATCH}-${NETDATA_VERSION_TWEAK}-${NETDATA_VERSION_DESCR})
+endif()
+
+set(CPACK_THREADS 0)
+
+set(CPACK_STRIP_FILES NO)
+set(CPACK_DEBIAN_DEBUGINFO_PACKAGE NO)
+
+set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS YES)
+
+set(CPACK_PACKAGING_INSTALL_PREFIX "/")
+
+set(CPACK_PACKAGE_VENDOR "Netdata Inc.")
+
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/LICENSE")
+set(CPACK_RESOURCE_FILE_README "${CMAKE_SOURCE_DIR}/README.md")
+
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "netdata")
+set(CPACK_PACKAGE_DIRECTORY "${CMAKE_BINARY_DIR}/packages")
+
+# to silence lintian
+set(CPACK_INSTALL_DEFAULT_DIRECTORY_PERMISSIONS
+ OWNER_READ OWNER_WRITE OWNER_EXECUTE
+ GROUP_READ GROUP_EXECUTE
+ WORLD_READ WORLD_EXECUTE)
+
+#
+# Debian options
+#
+
+set(CPACK_DEB_COMPONENT_INSTALL YES)
+set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS YES)
+set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
+
+set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Netdata Builder <bot@netdata.cloud>")
+
+#
+# netdata
+#
+
+set(CPACK_COMPONENT_NETDATA_DESCRIPTION
+ "real-time charts for system monitoring
+ Netdata is a daemon that collects data in realtime (per second)
+ and presents a web site to view and analyze them. The presentation
+ is also real-time and full of interactive charts that precisely
+ render all collected values.")
+
+set(CPACK_DEBIAN_NETDATA_PACKAGE_NAME "netdata")
+set(CPACK_DEBIAN_NETDATA_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_NETDATA_PACKAGE_PREDEPENDS "adduser, libcap2-bin")
+set(CPACK_DEBIAN_NETDATA_PACKAGE_SUGGESTS
+ "netdata-plugin-cups, netdata-plugin-freeipmi")
+set(CPACK_DEBIAN_NETDATA_PACKAGE_RECOMMENDS
+ "netdata-plugin-systemd-journal, \
+netdata-plugin-network-viewer")
+set(CPACK_DEBIAN_NETDATA_PACKAGE_CONFLICTS
+ "netdata-core, netdata-plugins-bash, netdata-plugins-python, netdata-web")
+
+list(APPEND _main_deps "netdata-plugin-chartsd")
+list(APPEND _main_deps "netdata-plugin-pythond")
+
+if(ENABLE_PLUGIN_APPS)
+ list(APPEND _main_deps "netdata-plugin-apps")
+endif()
+
+if(ENABLE_PLUGIN_GO)
+ list(APPEND _main_deps "netdata-plugin-go")
+endif()
+
+if(ENABLE_PLUGIN_DEBUGFS)
+ list(APPEND _main_deps "netdata-plugin-debugfs")
+endif()
+
+if(ENABLE_PLUGIN_NFACCT)
+ list(APPEND _main_deps "netdata-plugin-nfacct")
+endif()
+
+if(ENABLE_PLUGIN_SLABINFO)
+ list(APPEND _main_deps "netdata-plugin-slabinfo")
+endif()
+
+if(ENABLE_PLUGIN_PERF)
+ list(APPEND _main_deps "netdata-plugin-perf")
+endif()
+
+if(ENABLE_PLUGIN_EBPF)
+ list(APPEND _main_deps "netdata-plugin-ebpf")
+endif()
+
+list(JOIN _main_deps ", " CPACK_DEBIAN_NETDATA_PACKAGE_DEPENDS)
+
+set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/netdata/conffiles;"
+ "${PKG_FILES_PATH}/deb/netdata/preinst"
+ "${PKG_FILES_PATH}/deb/netdata/postinst"
+ "${PKG_FILES_PATH}/deb/netdata/postrm")
+
+set(CPACK_DEBIAN_NETDATA_DEBUGINFO_PACKAGE On)
+
+#
+# apps.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-APPS_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-APPS_DESCRIPTION
+ "The per-application metrics collector plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect per-application and
+ per-user metrics without using cgroups.")
+
+set(CPACK_DEBIAN_PLUGIN-APPS_PACKAGE_NAME "netdata-plugin-apps")
+set(CPACK_DEBIAN_PLUGIN-APPS_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-APPS_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-APPS_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+
+set(CPACK_DEBIAN_PLUGIN-APPS_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-apps/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-apps/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-APPS_DEBUGINFO_PACKAGE On)
+
+#
+# charts.d.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-CHARTSD_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-CHARTSD_DESCRIPTION
+ "The charts.d metrics collection plugin for the Netdata Agent
+ This plugin adds a selection of additional collectors written in shell
+ script to the Netdata Agent. It includes collectors for APCUPSD,
+ LibreSWAN, OpenSIPS, and Wireless access point statistics.")
+
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_NAME "netdata-plugin-chartsd")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_PREDEPENDS "adduser")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_DEPENDS "bash")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_ARCHITECTURE "all")
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_SUGGESTS "apcupsd, iw, sudo")
+
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-chartsd/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-chartsd/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-CHARTSD_DEBUGINFO_PACKAGE Off)
+
+#
+# cups.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-CUPS_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-CUPS_DESCRIPTION
+ "The CUPS metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from the Common UNIX Printing System.")
+
+set(CPACK_DEBIAN_PLUGIN-CUPS_PACKAGE_NAME "netdata-plugin-cups")
+set(CPACK_DEBIAN_PLUGIN-CUPS_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-CUPS_PACKAGE_PREDEPENDS "adduser")
+set(CPACK_DEBIAN_PLUGIN-CUPS_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-cups/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-cups/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-CUPS_DEBUGINFO_PACKAGE On)
+
+#
+# debugfs.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-DEBUGFS_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-DEBUGFS_DESCRIPTION
+ "The debugfs metrics collector for the Netdata Agent
+ This plugin allows the Netdata Agent to collect Linux kernel metrics
+ exposed through debugfs.")
+
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_PACKAGE_NAME "netdata-plugin-debugfs")
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-debugfs/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-debugfs/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-DEBUGFS_DEBUGINFO_PACKAGE On)
+
+#
+# ebpf.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-EBPF_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-EBPF_DESCRIPTION
+ "The eBPF metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to use eBPF code to collect more
+ detailed kernel-level metrics for the system.")
+
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_NAME "netdata-plugin-ebpf")
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_PREDEPENDS "adduser")
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_RECOMMENDS "netdata-plugin-apps (= ${CPACK_PACKAGE_VERSION}), netdata-ebpf-code-legacy (= ${CPACK_PACKAGE_VERSION})")
+
+set(CPACK_DEBIAN_PLUGIN-EBPF_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-ebpf/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-ebpf/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-EBPF_DEBUGINFO_PACKAGE On)
+
+#
+# ebpf-code-legacy
+#
+
+set(CPACK_COMPONENT_EBPF-CODE-LEGACY_DEPENDS "netdata")
+set(CPACK_COMPONENT_EBPF-CODE-LEGACY_DESCRIPTION
+ "Compiled eBPF legacy code for the Netdata eBPF plugin
+ This package provides the pre-compiled eBPF legacy code for use by
+ the Netdata eBPF plugin. This code is only needed when using the eBPF
+ plugin with kernel that do not include BTF support (mostly kernel
+ versions lower than 5.10).")
+
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_NAME "netdata-ebpf-code-legacy")
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_PREDEPENDS "adduser")
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_RECOMMENDS "netdata-plugin-ebpf (= ${CPACK_PACKAGE_VERSION})")
+
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/ebpf-code-legacy/preinst;"
+ "${PKG_FILES_PATH}/deb/ebpf-code-legacy/postinst")
+
+set(CPACK_DEBIAN_EBPF-CODE-LEGACY_DEBUGINFO_PACKAGE Off)
+
+#
+# freeipmi.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-FREEIPMI_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-FREEIPMI_DESCRIPTION
+ "The FreeIPMI metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from hardware
+ using FreeIPMI.")
+
+set(CPACK_DEBIAN_PLUGIN-FREEIPMI_PACKAGE_NAME "netdata-plugin-freeipmi")
+set(CPACK_DEBIAN_PLUGIN-FREEIPMI_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-FREEIPMI_PACKAGE_PREDEPENDS "adduser")
+
+set(CPACK_DEBIAN_PLUGIN-FREEIPMI_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-freeipmi/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-freeipmi/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-FREEIPMI_DEBUGINFO_PACKAGE On)
+
+#
+# go.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-GO_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-GO_DESCRIPTION
+ "The go.d metrics collection plugin for the Netdata Agent
+ This plugin adds a selection of additional collectors written in Go to
+ the Netdata Agent. A significant percentage of the application specific
+ collectors provided by Netdata are part of this plugin, so most users
+ will want it installed.")
+
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_NAME "netdata-plugin-go")
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_SUGGESTS "nvme-cli")
+
+set(CPACK_DEBIAN_PLUGIN-GO_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-go/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-go/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-GO_DEBUGINFO_PACKAGE Off)
+
+#
+# network-viewer.plugin
+#
+
+# TODO: recommends netdata-plugin-ebpf
+set(CPACK_COMPONENT_PLUGIN-NETWORK-VIEWER_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-NETWORK-VIEWER_DESCRIPTION
+ "The network viewer plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to provide network connection
+ mapping functionality for use in Netdata Cloud.")
+
+set(CPACK_DEBIAN_PLUGIN-NETWORK_VIEWER_PACKAGE_NAME "netdata-plugin-network-viewer")
+set(CPACK_DEBIAN_PLUGIN-NETWORK-VIEWER_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-NETWORK-VIEWER_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+set(CPACK_DEBIAN_PLUGIN-NETWORK-VIEWER_PACKAGE_RECOMMENDS "netdata-plugin-ebpf (= ${CPACK_PACKAGE_VERSION})")
+
+set(CPACK_DEBIAN_PLUGIN-NETWORK-VIEWER_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-network-viewer/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-network-viewer/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-NETWORK-VIEWER_DEBUGINFO_PACKAGE On)
+
+#
+# nfacct.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-NFACCT_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-NFACCT_DESCRIPTION
+ "The NFACCT metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from the firewall
+ using NFACCT objects.")
+
+set(CPACK_DEBIAN_PLUGIN-NFACCT_PACKAGE_NAME "netdata-plugin-nfacct")
+set(CPACK_DEBIAN_PLUGIN-NFACCT_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-NFACCT_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-NFACCT_PACKAGE_PREDEPENDS "adduser")
+
+set(CPACK_DEBIAN_PLUGIN-NFACCT_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-nfacct/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-nfacct/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-NFACCT_DEBUGINFO_PACKAGE On)
+
+#
+# perf.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-PERF_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-PERF_DESCRIPTION
+ "The perf metrics collector for the Netdata Agent
+ This plugin allows the Netdata to collect metrics from the Linux perf
+ subsystem.")
+
+set(CPACK_DEBIAN_PLUGIN-PERF_PACKAGE_NAME "netdata-plugin-perf")
+set(CPACK_DEBIAN_PLUGIN-PERF_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-PERF_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-PERF_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+
+set(CPACK_DEBIAN_PLUGIN-PERF_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-perf/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-perf/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-PERF_DEBUGINFO_PACKAGE On)
+
+#
+# pythond.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-PYTHOND_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-PYTHOND_DESCRIPTION
+ "The python.d metrics collection plugin for the Netdata Agent
+ Many of the collectors provided by this package are also available
+ in netdata-plugin-god. In msot cases, you probably want to use those
+ versions instead of the Python versions.")
+
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_NAME "netdata-plugin-pythond")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_PREDEPENDS "adduser")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_SUGGESTS "sudo")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACHAGE_DEPENDS "python3")
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_ARCHITECTURE "all")
+
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-pythond/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-pythond/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-PYTHOND_DEBUGINFO_PACKAGE Off)
+
+#
+# slabinfo.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-SLABINFO_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-SLABINFO_DESCRIPTION
+ "The slabinfo metrics collector for the Netdata Agent
+ This plugin allows the Netdata Agent to collect perfromance and
+ utilization metrics for the Linux kernel’s SLAB allocator.")
+
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_PACKAGE_NAME "netdata-plugin-slabinfo")
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-slabinfo/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-slabinfo/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-SLABINFO_DEBUGINFO_PACKAGE On)
+
+#
+# systemd-journal.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-SYSTEMD-JOURNAL_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-SYSTEMD-JOURNAL_DESCRIPTION
+ "The systemd-journal collector for the Netdata Agent
+ This plugin allows the Netdata Agent to present logs from the systemd
+ journal on Netdata Cloud or the local Agent dashboard.")
+
+set(CPACK_DEBIAN_PLUGIN-SYSTEMD-JOURNAL_PACKAGE_NAME "netdata-plugin-systemd-journal")
+set(CPACK_DEBIAN_PLUGIN-SYSTEMD-JOURNAL_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-SYSTEMD-JOURNAL_PACKAGE_PREDEPENDS "libcap2-bin, adduser")
+
+set(CPACK_DEBIAN_PLUGIN-SYSTEMD-JOURNAL_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-systemd-journal/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-systemd-journal/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-SYSTEMD_JOURNAL_DEBUGINFO_PACKAGE On)
+
+#
+# xenstat.plugin
+#
+
+set(CPACK_COMPONENT_PLUGIN-XENSTAT_DEPENDS "netdata")
+set(CPACK_COMPONENT_PLUGIN-XENSTAT_DESCRIPTION
+ "The xenstat plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from the Xen
+ Hypervisor.")
+
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_PACKAGE_NAME "netdata-plugin-xenstat")
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_PACKAGE_SECTION "net")
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_PACKAGE_CONFLICTS "netdata (<< 1.40)")
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_PACKAGE_PREDEPENDS "adduser")
+
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_PACKAGE_CONTROL_EXTRA
+ "${PKG_FILES_PATH}/deb/plugin-xenstat/preinst;"
+ "${PKG_FILES_PATH}/deb/plugin-xenstat/postinst")
+
+set(CPACK_DEBIAN_PLUGIN-XENSTAT_DEBUGINFO_PACKAGE On)
+
+#
+# CPack components
+#
+
+list(APPEND CPACK_COMPONENTS_ALL "netdata")
+if(ENABLE_PLUGIN_APPS)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-apps")
+endif()
+list(APPEND CPACK_COMPONENTS_ALL "plugin-chartsd")
+if(ENABLE_PLUGIN_CUPS)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-cups")
+endif()
+if(ENABLE_PLUGIN_DEBUGFS)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-debugfs")
+endif()
+if(ENABLE_PLUGIN_EBPF)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-ebpf")
+endif()
+if(ENABLE_EBPF_LEGACY_PROGRAMS)
+ list(APPEND CPACK_COMPONENTS_ALL "ebpf-code-legacy")
+endif()
+if(ENABLE_PLUGIN_FREEIPMI)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-freeipmi")
+endif()
+if(ENABLE_PLUGIN_GO)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-go")
+endif()
+if(ENABLE_PLUGIN_NETWORK_VIEWER)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-network-viewer")
+endif()
+if(ENABLE_PLUGIN_NFACCT)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-nfacct")
+endif()
+if(ENABLE_PLUGIN_PERF)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-perf")
+endif()
+list(APPEND CPACK_COMPONENTS_ALL "plugin-pythond")
+if(ENABLE_PLUGIN_SLABINFO)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-slabinfo")
+endif()
+if(ENABLE_PLUGIN_SYSTEMD_JOURNAL)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-systemd-journal")
+endif()
+if(ENABLE_PLUGIN_XENSTAT)
+ list(APPEND CPACK_COMPONENTS_ALL "plugin-xenstat")
+endif()
+
+include(CPack)
diff --git a/packaging/cmake/config.cmake.h.in b/packaging/cmake/config.cmake.h.in
new file mode 100644
index 000000000..57d032693
--- /dev/null
+++ b/packaging/cmake/config.cmake.h.in
@@ -0,0 +1,194 @@
+/* This file was generated by CMAKE from config.cmake.h.in */
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define __always_unused __attribute__((unused))
+#define __maybe_unused __attribute__((unused))
+
+#cmakedefine SIZEOF_VOID_P ${SIZEOF_VOID_P}
+
+// platform
+#cmakedefine OS_FREEBSD
+#cmakedefine OS_LINUX
+#cmakedefine OS_MACOS
+#cmakedefine OS_WINDOWS
+
+// checked headers
+
+#cmakedefine HAVE_NETINET_IN_H
+#cmakedefine HAVE_RESOLV_H
+#cmakedefine HAVE_NETDB_H
+#cmakedefine HAVE_SYS_PRCTL_H
+#cmakedefine HAVE_SYS_STAT_H
+#cmakedefine HAVE_SYS_VFS_H
+#cmakedefine HAVE_SYS_STATFS_H
+#cmakedefine HAVE_LINUX_MAGIC_H
+#cmakedefine HAVE_SYS_MOUNT_H
+#cmakedefine HAVE_SYS_STATVFS_H
+#cmakedefine HAVE_INTTYPES_H
+#cmakedefine HAVE_STDINT_H
+#cmakedefine HAVE_SYS_CAPABILITY_H
+#cmakedefine HAVE_ARPA_INET_H
+#cmakedefine HAVE_NETINET_TCP_H
+#cmakedefine HAVE_SYS_IOCTL_H
+#cmakedefine HAVE_GRP_H
+#cmakedefine HAVE_PWD_H
+#cmakedefine HAVE_NET_IF_H
+#cmakedefine HAVE_POLL_H
+#cmakedefine HAVE_SYSLOG_H
+#cmakedefine HAVE_SYS_MMAN_H
+#cmakedefine HAVE_SYS_RESOURCE_H
+#cmakedefine HAVE_SYS_SOCKET_H
+#cmakedefine HAVE_SYS_WAIT_H
+#cmakedefine HAVE_SYS_UN_H
+#cmakedefine HAVE_SPAWN_H
+
+#cmakedefine HAVE_CAPABILITY
+#cmakedefine HAVE_PROTOBUF
+#cmakedefine BUNDLED_PROTOBUF
+#cmakedefine HAVE_MONGOC
+#cmakedefine HAVE_LIBDATACHANNEL
+
+// checked symbols
+
+#cmakedefine MAJOR_IN_SYSMACROS
+#cmakedefine MAJOR_IN_MKDEV
+#cmakedefine HAVE_CLOCK_GETTIME
+#cmakedefine HAVE_STRERROR_R
+#cmakedefine HAVE_FINITE
+#cmakedefine HAVE_ISFINITE
+#cmakedefine HAVE_RECVMMSG
+#cmakedefine HAVE_PTHREAD_GETTHREADID_NP
+#cmakedefine HAVE_PTHREAD_THREADID_NP
+#cmakedefine HAVE_GETTID
+#cmakedefine HAVE_WAITID
+#cmakedefine HAVE_NICE
+#cmakedefine HAVE_GETPRIORITY
+#cmakedefine HAVE_SETENV
+#cmakedefine HAVE_DLSYM
+
+#cmakedefine HAVE_BACKTRACE
+#cmakedefine HAVE_CLOSE_RANGE
+#cmakedefine HAVE_SCHED_GETSCHEDULER
+#cmakedefine HAVE_SCHED_SETSCHEDULER
+#cmakedefine HAVE_SCHED_GET_PRIORITY_MIN
+#cmakedefine HAVE_SCHED_GET_PRIORITY_MAX
+
+#cmakedefine HAVE_SYSTEMD
+#cmakedefine HAVE_SD_JOURNAL_OS_ROOT
+#cmakedefine HAVE_SD_JOURNAL_OPEN_FILES_FD
+#cmakedefine HAVE_SD_JOURNAL_RESTART_FIELDS
+#cmakedefine HAVE_SD_JOURNAL_GET_SEQNUM
+#cmakedefine ENABLE_SYSTEMD_DBUS
+
+// checked source compilation
+
+#cmakedefine HAVE_PTHREAD_GETNAME_NP
+#cmakedefine HAVE_ACCEPT4
+#cmakedefine STRERROR_R_CHAR_P
+#cmakedefine HAVE_C__GENERIC
+#cmakedefine HAVE_C_MALLOPT
+#cmakedefine HAVE_SETNS
+#cmakedefine HAVE_STRNDUP
+#cmakedefine SSL_HAS_PENDING
+
+#cmakedefine HAVE_FUNC_ATTRIBUTE_FORMAT_GNU_PRINTF
+#cmakedefine HAVE_FUNC_ATTRIBUTE_FORMAT_PRINTF
+#cmakedefine HAVE_FUNC_ATTRIBUTE_MALLOC
+#cmakedefine HAVE_FUNC_ATTRIBUTE_NOINLINE
+#cmakedefine HAVE_FUNC_ATTRIBUTE_NORETURN
+#cmakedefine HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#cmakedefine HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+
+// enabled features
+
+#cmakedefine ENABLE_OPENSSL
+#cmakedefine ENABLE_CLOUD
+#cmakedefine ENABLE_ACLK
+#cmakedefine ENABLE_ML
+#cmakedefine ENABLE_EXPORTING_MONGODB
+#cmakedefine ENABLE_H2O
+#cmakedefine ENABLE_DBENGINE
+#cmakedefine ENABLE_HTTPS
+#cmakedefine ENABLE_LZ4
+#cmakedefine ENABLE_ZSTD
+#cmakedefine ENABLE_BROTLI
+
+#cmakedefine ENABLE_LOGSMANAGEMENT
+#cmakedefine ENABLE_LOGSMANAGEMENT_TESTS
+#cmakedefine ENABLE_PROMETHEUS_REMOTE_WRITE
+
+// enabled plugins
+
+#cmakedefine ENABLE_PLUGIN_DEBUGFS
+#cmakedefine ENABLE_PLUGIN_APPS
+#cmakedefine ENABLE_PLUGIN_FREEIPMI
+#cmakedefine ENABLE_PLUGIN_NFACCT
+#cmakedefine ENABLE_PLUGIN_XENSTAT
+#cmakedefine ENABLE_PLUGIN_PERF
+#cmakedefine ENABLE_PLUGIN_SLABINFO
+#cmakedefine ENABLE_PLUGIN_CUPS
+#cmakedefine ENABLE_PLUGIN_CGROUP_NETWORK
+#cmakedefine ENABLE_PLUGIN_EBPF
+
+// enabled sentry
+
+#cmakedefine ENABLE_SENTRY
+#cmakedefine NETDATA_SENTRY_ENVIRONMENT "@NETDATA_SENTRY_ENVIRONMENT@"
+#cmakedefine NETDATA_SENTRY_DIST "@NETDATA_SENTRY_DIST@"
+#cmakedefine NETDATA_SENTRY_DSN "@NETDATA_SENTRY_DSN@"
+// enabled bundling
+
+#cmakedefine ENABLE_BUNDLED_JSONC
+#cmakedefine ENABLE_BUNDLED_YAML
+#cmakedefine ENABLE_BUNDLED_PROTOBUF
+
+// directory paths
+
+#cmakedefine CACHE_DIR "@CACHE_DIR@"
+#cmakedefine CONFIG_DIR "@CONFIG_DIR@"
+#cmakedefine LIBCONFIG_DIR "@LIBCONFIG_DIR@"
+#cmakedefine LOG_DIR "@LOG_DIR@"
+#cmakedefine PLUGINS_DIR "@PLUGINS_DIR@"
+#cmakedefine WEB_DIR "@WEB_DIR@"
+#cmakedefine VARLIB_DIR "@VARLIB_DIR@"
+
+// config command, user and version
+
+#cmakedefine CONFIGURE_COMMAND "@CONFIGURE_COMMAND@"
+#cmakedefine NETDATA_USER "@NETDATA_USER@"
+
+#define NETDATA_VERSION_MAJOR "@NETDATA_VERSION_MAJOR@"
+#define NETDATA_VERSION_MINOR "@NETDATA_VERSION_MINOR@"
+#define NETDATA_VERSION_PATCH "@NETDATA_VERSION_PATCH@"
+#define NETDATA_VERSION_TWEAK "@NETDATA_VERSION_TWEAK@"
+#define NETDATA_VERSION_DESCR "@NETDATA_VERSION_DESCR@"
+
+#define NETDATA_VERSION "@NETDATA_VERSION_STRING@"
+
+#define ENABLE_JSONC 1
+
+#cmakedefine HAVE_LIBYAML
+#cmakedefine HAVE_LIBMNL
+
+// /* Enable GNU extensions on systems that have them. */
+// #ifndef _GNU_SOURCE
+// # define _GNU_SOURCE 1
+// #endif
+
+// #cmakedefine HAVE_CRYPTO
+
+// #cmakedefine ENABLE_PROMETHEUS_REMOTE_WRITE
+
+// /* NSA spy stuff */
+// #define ENABLE_HTTPS 1
+// #cmakedefine01 HAVE_X509_VERIFY_PARAM_set1_host
+
+#define HAVE_CRYPTO
+#define HAVE_X509_VERIFY_PARAM_set1_host 1
+
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1
+#endif
diff --git a/contrib/debian/copyright b/packaging/cmake/pkg-files/copyright
index 085580ea1..085580ea1 100644
--- a/contrib/debian/copyright
+++ b/packaging/cmake/pkg-files/copyright
diff --git a/packaging/cmake/pkg-files/deb/ebpf-code-legacy/postinst b/packaging/cmake/pkg-files/deb/ebpf-code-legacy/postinst
new file mode 100755
index 000000000..d6fe86723
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/ebpf-code-legacy/postinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-ebpf-code-legacy.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/ebpf-code-legacy/preinst b/packaging/cmake/pkg-files/deb/ebpf-code-legacy/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/ebpf-code-legacy/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/contrib/debian/conffiles b/packaging/cmake/pkg-files/deb/netdata/conffiles
index 1ec902547..1ec902547 100644
--- a/contrib/debian/conffiles
+++ b/packaging/cmake/pkg-files/deb/netdata/conffiles
diff --git a/contrib/debian/netdata.default b/packaging/cmake/pkg-files/deb/netdata/etc/default/netdata
index 0bc847fe7..0bc847fe7 100644
--- a/contrib/debian/netdata.default
+++ b/packaging/cmake/pkg-files/deb/netdata/etc/default/netdata
diff --git a/contrib/debian/netdata.init b/packaging/cmake/pkg-files/deb/netdata/etc/init.d/netdata
index c2706caa5..c2706caa5 100755
--- a/contrib/debian/netdata.init
+++ b/packaging/cmake/pkg-files/deb/netdata/etc/init.d/netdata
diff --git a/packaging/cmake/pkg-files/deb/netdata/postinst b/packaging/cmake/pkg-files/deb/netdata/postinst
new file mode 100755
index 000000000..97593c23b
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/netdata/postinst
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ if ! dpkg-statoverride --list /var/lib/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/lib/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/cache/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/cache/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/run/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/run/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/log/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata adm 02750 /var/log/netdata
+ fi
+
+ if ! dpkg-statoverride --list /usr/share/netdata/www > /dev/null 2>&1; then
+ dpkg-statoverride --update --add root netdata 0755 /usr/share/netdata/www
+ fi
+
+ dpkg-statoverride --force --update --add root netdata 0775 /var/lib/netdata/registry > /dev/null 2>&1
+
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata.list | xargs -n 30 chown root:netdata
+
+ for f in ndsudo cgroup-network local-listeners ioping.plugin; do
+ chmod 4750 "/usr/libexec/netdata/plugins.d/${f}" || true
+ done
+
+ ;;
+esac
+
+if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
+ deb-systemd-helper unmask 'netdata.service' >/dev/null || true
+
+ if deb-systemd-helper --quiet was-enabled 'netdata.service'; then
+ deb-systemd-helper enable 'netdata.service' >/dev/null || true
+ else
+ deb-systemd-helper update-state 'netdata.service' >/dev/null || true
+ fi
+
+ if [ -z "${DPKG_ROOT:-}" ] && [ -d /run/systemd/system ]; then
+ systemctl --system daemon-reload >/dev/null || true
+ deb-systemd-invoke restart 'netdata.service' >/dev/null || true
+ fi
+fi
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/netdata/postrm b/packaging/cmake/pkg-files/deb/netdata/postrm
new file mode 100755
index 000000000..7a636863b
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/netdata/postrm
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ remove) ;;
+
+ purge)
+ if dpkg-statoverride --list | grep -qw /var/cache/netdata; then
+ dpkg-statoverride --remove /var/cache/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then
+ dpkg-statoverride --remove /var/lib/netdata/www
+ fi
+
+ if dpkg-statoverride --list | grep -qw /usr/share/netdata/www; then
+ dpkg-statoverride --remove /usr/share/netdata/www
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata/registry; then
+ dpkg-statoverride --remove /var/lib/netdata/registry
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata; then
+ dpkg-statoverride --remove /var/lib/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/run/netdata; then
+ dpkg-statoverride --remove /var/run/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/log/netdata; then
+ dpkg-statoverride --remove /var/log/netdata
+ fi
+ ;;
+
+ *) ;;
+
+esac
+
+if [ "$1" = "remove" ]; then
+ if [ -x "/usr/bin/deb-systemd-helper" ]; then
+ deb-systemd-helper mask 'netdata.service' >/dev/null || true
+ fi
+fi
+
+if [ "$1" = "purge" ]; then
+ if [ -x "/usr/bin/deb-systemd-helper" ]; then
+ deb-systemd-helper purge 'netdata.service' >/dev/null || true
+ deb-systemd-helper unmask 'netdata.service' >/dev/null || true
+ fi
+fi
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/netdata/preinst b/packaging/cmake/pkg-files/deb/netdata/preinst
new file mode 100755
index 000000000..6dcf201d6
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/netdata/preinst
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+
+ if ! getent passwd netdata > /dev/null; then
+ adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata
+ fi
+
+ for item in docker nginx varnish haproxy adm nsd proxy squid ceph nobody I2C; do
+ if getent group $item > /dev/null 2>&1; then
+ usermod -a -G $item netdata
+ fi
+ done
+ # Netdata must be able to read /etc/pve/qemu-server/* and /etc/pve/lxc/*
+ # for reading VMs/containers names, CPU and memory limits on Proxmox.
+ if [ -d "/etc/pve" ] && getent group "www-data" > /dev/null 2>&1; then
+ usermod -a -G www-data netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-apps/postinst b/packaging/cmake/pkg-files/deb/plugin-apps/postinst
new file mode 100755
index 000000000..f4621e595
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-apps/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/apps.plugin
+ if ! setcap "cap_dac_read_search=eip cap_sys_ptrace=eip" /usr/libexec/netdata/plugins.d/apps.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/apps.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-apps/preinst b/packaging/cmake/pkg-files/deb/plugin-apps/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-apps/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-chartsd/postinst b/packaging/cmake/pkg-files/deb/plugin-chartsd/postinst
new file mode 100755
index 000000000..eddb51894
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-chartsd/postinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-plugin-chartsd.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-chartsd/preinst b/packaging/cmake/pkg-files/deb/plugin-chartsd/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-chartsd/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-cups/postinst b/packaging/cmake/pkg-files/deb/plugin-cups/postinst
new file mode 100755
index 000000000..2490a4729
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-cups/postinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/cups.plugin
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-cups/preinst b/packaging/cmake/pkg-files/deb/plugin-cups/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-cups/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-debugfs/postinst b/packaging/cmake/pkg-files/deb/plugin-debugfs/postinst
new file mode 100755
index 000000000..e07ed60ff
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-debugfs/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/debugfs.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/debugfs.plugin
+ if ! setcap "cap_dac_read_search=eip" /usr/libexec/netdata/plugins.d/debugfs.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/debugfs.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-debugfs/preinst b/packaging/cmake/pkg-files/deb/plugin-debugfs/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-debugfs/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-ebpf/postinst b/packaging/cmake/pkg-files/deb/plugin-ebpf/postinst
new file mode 100755
index 000000000..767702282
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-ebpf/postinst
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-plugin-ebpf.list | xargs -n 30 chown root:netdata
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/ebpf.plugin
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-ebpf/preinst b/packaging/cmake/pkg-files/deb/plugin-ebpf/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-ebpf/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-freeipmi/postinst b/packaging/cmake/pkg-files/deb/plugin-freeipmi/postinst
new file mode 100755
index 000000000..956c9c0de
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-freeipmi/postinst
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/freeipmi.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/freeipmi.plugin
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-freeipmi/preinst b/packaging/cmake/pkg-files/deb/plugin-freeipmi/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-freeipmi/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-go/postinst b/packaging/cmake/pkg-files/deb/plugin-go/postinst
new file mode 100755
index 000000000..2b60067ad
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-go/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/go.d.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/go.d.plugin
+ if ! setcap "cap_dac_read_search+epi cap_net_admin=eip cap_net_raw=eip" /usr/libexec/netdata/plugins.d/go.d.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/go.d.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-go/preinst b/packaging/cmake/pkg-files/deb/plugin-go/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-go/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-network-viewer/postinst b/packaging/cmake/pkg-files/deb/plugin-network-viewer/postinst
new file mode 100755
index 000000000..a388ded91
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-network-viewer/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/network-viewer.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/network-viewer.plugin
+ if ! setcap "cap_dac_read_search,cap_sys_admin,cap_sys_ptrace=eip" /usr/libexec/netdata/plugins.d/network-viewer.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/network-viewer.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-network-viewer/preinst b/packaging/cmake/pkg-files/deb/plugin-network-viewer/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-network-viewer/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-nfacct/postinst b/packaging/cmake/pkg-files/deb/plugin-nfacct/postinst
new file mode 100755
index 000000000..d3c8e0d25
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-nfacct/postinst
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/nfacct.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/nfacct.plugin
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-nfacct/preinst b/packaging/cmake/pkg-files/deb/plugin-nfacct/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-nfacct/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-perf/postinst b/packaging/cmake/pkg-files/deb/plugin-perf/postinst
new file mode 100755
index 000000000..f39d443fc
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-perf/postinst
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/perf.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/perf.plugin
+
+ if ! setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin 2>/dev/null; then
+ if ! setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin 2>/dev/null; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/perf.plugin
+ fi
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-perf/preinst b/packaging/cmake/pkg-files/deb/plugin-perf/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-perf/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-pythond/postinst b/packaging/cmake/pkg-files/deb/plugin-pythond/postinst
new file mode 100755
index 000000000..5a8e8c694
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-pythond/postinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-plugin-pythond.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-pythond/preinst b/packaging/cmake/pkg-files/deb/plugin-pythond/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-pythond/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-slabinfo/postinst b/packaging/cmake/pkg-files/deb/plugin-slabinfo/postinst
new file mode 100755
index 000000000..149764469
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-slabinfo/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ if ! setcap "cap_dac_read_search=eip" /usr/libexec/netdata/plugins.d/slabinfo.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-slabinfo/preinst b/packaging/cmake/pkg-files/deb/plugin-slabinfo/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-slabinfo/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-systemd-journal/postinst b/packaging/cmake/pkg-files/deb/plugin-systemd-journal/postinst
new file mode 100755
index 000000000..9a2e94fc9
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-systemd-journal/postinst
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/systemd-journal.plugin
+ chmod 0750 /usr/libexec/netdata/plugins.d/systemd-journal.plugin
+ if ! setcap "cap_dac_read_search=eip" /usr/libexec/netdata/plugins.d/systemd-journal.plugin; then
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/systemd-journal.plugin
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-systemd-journal/preinst b/packaging/cmake/pkg-files/deb/plugin-systemd-journal/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-systemd-journal/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/cmake/pkg-files/deb/plugin-xenstat/postinst b/packaging/cmake/pkg-files/deb/plugin-xenstat/postinst
new file mode 100755
index 000000000..c7bb35df0
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-xenstat/postinst
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/xenstat.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/xenstat.plugin
+ ;;
+esac
+
+exit 0
diff --git a/packaging/cmake/pkg-files/deb/plugin-xenstat/preinst b/packaging/cmake/pkg-files/deb/plugin-xenstat/preinst
new file mode 100755
index 000000000..57615ec06
--- /dev/null
+++ b/packaging/cmake/pkg-files/deb/plugin-xenstat/preinst
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
diff --git a/packaging/current_libbpf.checksums b/packaging/current_libbpf.checksums
deleted file mode 100644
index 8279c1dd3..000000000
--- a/packaging/current_libbpf.checksums
+++ /dev/null
@@ -1 +0,0 @@
-05e4ccdd3bc8532290eebc37b37455b253071244d30e42412a7071d89221f1c8 v1.2.2p_netdata.tar.gz
diff --git a/packaging/current_libbpf.version b/packaging/current_libbpf.version
deleted file mode 100644
index b5dc2d8bd..000000000
--- a/packaging/current_libbpf.version
+++ /dev/null
@@ -1 +0,0 @@
-1.2.2p_netdata
diff --git a/packaging/dag/README.md b/packaging/dag/README.md
new file mode 100644
index 000000000..4b17c5447
--- /dev/null
+++ b/packaging/dag/README.md
@@ -0,0 +1,23 @@
+- Install Dagger CLI:
+ ```
+ cd /usr/local
+ curl -L https://dl.dagger.io/dagger/install.sh | sudo sh
+ ```
+- Install python requirements:
+ ```
+ pip install -r packaging/dag/requirements.txt
+ ```
+
+Now you can run something like this:
+
+```
+dagger run python packaging/dag/main.py build -p linux/x86_64 -d debian12
+```
+
+or
+
+```
+dagger run python packaging/dag/main.py test
+```.
+
+
diff --git a/packaging/dag/build_command.py b/packaging/dag/build_command.py
new file mode 100644
index 000000000..dcb1b6c8a
--- /dev/null
+++ b/packaging/dag/build_command.py
@@ -0,0 +1,65 @@
+import click
+import asyncio
+import sys
+import dagger
+import pathlib
+import uuid
+
+from nd import (
+ Distribution,
+ NetdataInstaller,
+ FeatureFlags,
+ Endpoint,
+ AgentContext,
+ SUPPORTED_PLATFORMS,
+ SUPPORTED_DISTRIBUTIONS,
+)
+
+
+def run_async(func):
+ def wrapper(*args, **kwargs):
+ return asyncio.run(func(*args, **kwargs))
+
+ return wrapper
+
+
+@run_async
+async def simple_build(platform, distro):
+ config = dagger.Config(log_output=sys.stdout)
+
+ async with dagger.Connection(config) as client:
+ repo_root = pathlib.Path("/netdata")
+ prefix_path = pathlib.Path("/opt/netdata")
+
+ installer = NetdataInstaller(
+ platform, distro, repo_root, prefix_path, FeatureFlags.DBEngine
+ )
+
+ endpoint = Endpoint("node", 19999)
+ api_key = uuid.uuid4()
+ allow_children = False
+
+ agent_ctx = AgentContext(
+ client, platform, distro, installer, endpoint, api_key, allow_children
+ )
+
+ await agent_ctx.build_container()
+
+
+@click.command()
+@click.option(
+ "--platform",
+ "-p",
+ type=click.Choice(sorted([str(p) for p in SUPPORTED_PLATFORMS])),
+ help="Specify the platform.",
+)
+@click.option(
+ "--distribution",
+ "-d",
+ type=click.Choice(sorted([str(p) for p in SUPPORTED_DISTRIBUTIONS])),
+ help="Specify the distribution.",
+)
+def build(platform, distribution):
+ platform = dagger.Platform(platform)
+ distro = Distribution(distribution)
+ simple_build(platform, distro)
diff --git a/packaging/dag/files/child_stream.conf b/packaging/dag/files/child_stream.conf
new file mode 100644
index 000000000..ed78bd3fb
--- /dev/null
+++ b/packaging/dag/files/child_stream.conf
@@ -0,0 +1,10 @@
+[stream]
+ enabled = {{ enabled }}
+ destination = {{ destination }}
+ api key = {{ api_key }}
+ timeout seconds = {{ timeout_seconds }}
+ default port = {{ default_port }}
+ send charts matching = {{ send_charts_matching }}
+ buffer size bytes = {{ buffer_size_bytes }}
+ reconnect delay seconds = {{ reconnect_delay_seconds }}
+ initial clock resync iterations = {{ initial_clock_resync_iterations }}
diff --git a/packaging/dag/files/cmake-aarch64.sha256 b/packaging/dag/files/cmake-aarch64.sha256
new file mode 100644
index 000000000..122b26e99
--- /dev/null
+++ b/packaging/dag/files/cmake-aarch64.sha256
@@ -0,0 +1 @@
+a83e01ed1cdf44c2e33e0726513b9a35a8c09e3b5a126fd720b3c8a9d5552368 cmake-aarch64.sh
diff --git a/packaging/dag/files/cmake-x86_64.sha256 b/packaging/dag/files/cmake-x86_64.sha256
new file mode 100644
index 000000000..d5adc8aa1
--- /dev/null
+++ b/packaging/dag/files/cmake-x86_64.sha256
@@ -0,0 +1 @@
+8c449dabb2b2563ec4e6d5e0fb0ae09e729680efab71527b59015131cea4a042 cmake-x86_64.sh
diff --git a/packaging/dag/files/ol8-epel.repo b/packaging/dag/files/ol8-epel.repo
new file mode 100644
index 000000000..587cc3577
--- /dev/null
+++ b/packaging/dag/files/ol8-epel.repo
@@ -0,0 +1,6 @@
+[ol8_developer_EPEL]
+name=Oracle Linux $releasever EPEL Packages for Development ($basearch)
+baseurl=https://yum$ociregion.$ocidomain/repo/OracleLinux/OL8/developer/EPEL/$basearch/
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
+gpgcheck=1
+enabled=1
diff --git a/packaging/dag/files/ol9-epel.repo b/packaging/dag/files/ol9-epel.repo
new file mode 100644
index 000000000..c40007f1f
--- /dev/null
+++ b/packaging/dag/files/ol9-epel.repo
@@ -0,0 +1,6 @@
+[ol9_developer_EPEL]
+name=Oracle Linux $releasever EPEL Packages for Development ($basearch)
+baseurl=https://yum$ociregion.$ocidomain/repo/OracleLinux/OL9/developer/EPEL/$basearch/
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
+gpgcheck=1
+enabled=1
diff --git a/packaging/dag/files/parent_stream.conf b/packaging/dag/files/parent_stream.conf
new file mode 100644
index 000000000..15f303f97
--- /dev/null
+++ b/packaging/dag/files/parent_stream.conf
@@ -0,0 +1,7 @@
+[{{ api_key }}]
+ enabled = {{ enabled }}
+ allow from = {{ allow_from }}
+ default history = {{ default_history }}
+ health enabled by default = {{ health_enabled_by_default }}
+ default postpone alarms on connect seconds = {{ default_postpone_alarms_on_connect_seconds }}
+ multiple connections = {{ multiple_connections }}
diff --git a/packaging/dag/imageutils.py b/packaging/dag/imageutils.py
new file mode 100644
index 000000000..fd1e8ad26
--- /dev/null
+++ b/packaging/dag/imageutils.py
@@ -0,0 +1,1580 @@
+import os
+from pathlib import Path
+
+import dagger
+
+
+_ALPINE_COMMON_PACKAGES = [
+ "alpine-sdk",
+ "autoconf",
+ "automake",
+ "bash",
+ "binutils",
+ "bison",
+ "cmake",
+ "curl",
+ "curl-static",
+ "elfutils-dev",
+ "flex",
+ "gcc",
+ "git",
+ "gnutls-dev",
+ "gzip",
+ "jq",
+ "libelf-static",
+ "libmnl-dev",
+ "libmnl-static",
+ "libtool",
+ "libuv-dev",
+ "libuv-static",
+ "lz4-dev",
+ "lz4-static",
+ "make",
+ "mongo-c-driver-dev",
+ "mongo-c-driver-static",
+ "musl-fts-dev",
+ "ncurses",
+ "ncurses-static",
+ "netcat-openbsd",
+ "ninja",
+ "openssh",
+ "pcre2-dev",
+ "pkgconfig",
+ "protobuf-dev",
+ "snappy-dev",
+ "snappy-static",
+ "util-linux-dev",
+ "wget",
+ "xz",
+ "yaml-dev",
+ "yaml-static",
+ "zlib-dev",
+ "zlib-static",
+ "zstd-dev",
+ "zstd-static",
+]
+
+
+def build_alpine_3_18(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("alpine:3.18")
+
+ pkgs = [pkg for pkg in _ALPINE_COMMON_PACKAGES]
+
+ ctr = ctr.with_exec(["apk", "add", "--no-cache"] + pkgs)
+
+ return ctr
+
+
+def build_alpine_3_19(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("alpine:3.19")
+
+ pkgs = [pkg for pkg in _ALPINE_COMMON_PACKAGES]
+
+ ctr = ctr.with_exec(["apk", "add", "--no-cache"] + pkgs)
+
+ return ctr
+
+
+def static_build_openssl(
+ client: dagger.Client, ctr: dagger.Container
+) -> dagger.Container:
+ tree = (
+ client.git(url="https://github.com/openssl/openssl", keep_git_dir=True)
+ .tag("openssl-3.1.4")
+ .tree()
+ )
+
+ #
+ # TODO: verify 32-bit builds
+ #
+ ctr = (
+ ctr.with_directory("/openssl", tree)
+ .with_workdir("/openssl")
+ .with_env_variable("CFLAGS", "-fno-lto -pipe")
+ .with_env_variable("LDFLAGS", "-static")
+ .with_env_variable("PKG_CONFIG", "pkg-config --static")
+ .with_exec(
+ [
+ "sed",
+ "-i",
+ "s/disable('static', 'pic', 'threads');/disable('static', 'pic');/",
+ "Configure",
+ ]
+ )
+ .with_exec(
+ [
+ "./config",
+ "-static",
+ "threads",
+ "no-tests",
+ "--prefix=/openssl-static",
+ "--openssldir=/opt/netdata/etc/ssl",
+ ]
+ )
+ .with_exec(["make", "V=1", "-j", str(os.cpu_count())])
+ .with_exec(["make", "V=1", "-j", str(os.cpu_count()), "install_sw"])
+ .with_exec(["ln", "-s", "/openssl-static/lib", "/openssl-static/lib64"])
+ .with_exec(["perl", "configdata.pm", "--dump"])
+ )
+
+ return ctr
+
+
+def static_build_bash(client: dagger.Client, ctr: dagger.Container) -> dagger.Container:
+ tree = (
+ client.git(url="https://git.savannah.gnu.org/git/bash.git", keep_git_dir=True)
+ .tag("bash-5.1")
+ .tree()
+ )
+
+ ctr = (
+ ctr.with_directory("/bash", tree)
+ .with_workdir("/bash")
+ .with_env_variable("CFLAGS", "-pipe")
+ .with_env_variable("LDFLAGS", "")
+ .with_env_variable("PKG_CONFIG", "pkg-config --static")
+ .with_env_variable("PKG_CONFIG_PATH", "/openssl-static/lib64/pkgconfig")
+ .with_exec(
+ [
+ "./configure",
+ "--prefix",
+ "/opt/netdata",
+ "--without-bash-malloc",
+ "--enable-static-link",
+ "--enable-net-redirections",
+ "--enable-array-variables",
+ "--disable-progcomp",
+ "--disable-profiling",
+ "--disable-nls",
+ "--disable-dependency-tracking",
+ ]
+ )
+ .with_exec(
+ [
+ "echo",
+ "-e",
+ "all:\nclean:\ninstall:\n",
+ ">",
+ "examples/loadables/Makefile",
+ ]
+ )
+ .with_exec(["make", "clean"])
+ # see: https://gitweb.gentoo.org/repo/gentoo.git/tree/app-shells/bash/files/bash-5.1-parallel_make.patch?id=4c2ebbf4b8bc660beb98cc2d845c73375d6e4f50
+ .with_exec(["make", "V=1", "-j", "2", "install"])
+ .with_exec(["strip", "/opt/netdata/bin/bash"])
+ )
+
+ return ctr
+
+
+def static_build_curl(client: dagger.Client, ctr: dagger.Container) -> dagger.Container:
+ tree = (
+ client.git(url="https://github.com/curl/curl", keep_git_dir=True)
+ .tag("curl-8_4_0")
+ .tree()
+ )
+
+ ctr = (
+ ctr.with_directory("/curl", tree)
+ .with_workdir("/curl")
+ .with_env_variable("CFLAGS", "-I/openssl-static/include -pipe")
+ .with_env_variable("LDFLAGS", "-static -L/openssl-static/lib64")
+ .with_env_variable("PKG_CONFIG", "pkg-config --static")
+ .with_env_variable("PKG_CONFIG_PATH", "/openssl-static/lib64/pkgconfig")
+ .with_exec(["autoreconf", "-ifv"])
+ .with_exec(
+ [
+ "./configure",
+ "--prefix=/curl-static",
+ "--enable-optimize",
+ "--disable-shared",
+ "--enable-static",
+ "--enable-http",
+ "--disable-ldap",
+ "--disable-ldaps",
+ "--enable-proxy",
+ "--disable-dict",
+ "--disable-telnet",
+ "--disable-tftp",
+ "--disable-pop3",
+ "--disable-imap",
+ "--disable-smb",
+ "--disable-smtp",
+ "--disable-gopher",
+ "--enable-ipv6",
+ "--enable-cookies",
+ "--with-ca-fallback",
+ "--with-openssl",
+ "--disable-dependency-tracking",
+ ]
+ )
+ .with_exec(
+ ["sed", "-i", "-e", "s/LDFLAGS =/LDFLAGS = -all-static/", "src/Makefile"]
+ )
+ .with_exec(["make", "clean"])
+ .with_exec(["make", "V=1", "-j", str(os.cpu_count()), "install"])
+ .with_exec(["cp", "/curl-static/bin/curl", "/opt/netdata/bin/curl"])
+ .with_exec(["strip", "/opt/netdata/bin/curl"])
+ )
+
+ return ctr
+
+
+def static_build_ioping(
+ client: dagger.Client, ctr: dagger.Container
+) -> dagger.Container:
+ tree = (
+ client.git(url="https://github.com/koct9i/ioping", keep_git_dir=True)
+ .tag("v1.3")
+ .tree()
+ )
+
+ ctr = (
+ ctr.with_directory("/ioping", tree)
+ .with_workdir("/ioping")
+ .with_env_variable("CFLAGS", "-static -pipe")
+ .with_exec(["mkdir", "-p", "/opt/netdata/usr/libexec/netdata/plugins.d"])
+ .with_exec(["make", "V=1"])
+ .with_exec(
+ [
+ "install",
+ "-o",
+ "root",
+ "-g",
+ "root",
+ "-m",
+ "4750",
+ "ioping",
+ "/opt/netdata/usr/libexec/netdata/plugins.d",
+ ]
+ )
+ .with_exec(["strip", "/opt/netdata/usr/libexec/netdata/plugins.d/ioping"])
+ )
+
+ return ctr
+
+
+def static_build_libnetfilter_acct(
+ client: dagger.Client, ctr: dagger.Container
+) -> dagger.Container:
+ tree = (
+ client.git(url="git://git.netfilter.org/libnetfilter_acct", keep_git_dir=True)
+ .tag("libnetfilter_acct-1.0.3")
+ .tree()
+ )
+
+ ctr = (
+ ctr.with_directory("/libnetfilter_acct", tree)
+ .with_workdir("/libnetfilter_acct")
+ .with_env_variable("CFLAGS", "-static -I/usr/include/libmnl -pipe")
+ .with_env_variable("LDFLAGS", "-static -L/usr/lib -lmnl")
+ .with_env_variable("PKG_CONFIG", "pkg-config --static")
+ .with_env_variable("PKG_CONFIG_PATH", "/usr/lib/pkgconfig")
+ .with_exec(["autoreconf", "-ifv"])
+ .with_exec(
+ [
+ "./configure",
+ "--prefix=/libnetfilter_acct-static",
+ "--exec-prefix=/libnetfilter_acct-static",
+ ]
+ )
+ .with_exec(["make", "clean"])
+ .with_exec(["make", "V=1", "-j", str(os.cpu_count()), "install"])
+ )
+
+ return ctr
+
+
+def static_build_netdata(
+ client: dagger.Client, ctr: dagger.Container
+) -> dagger.Container:
+ CFLAGS = [
+ "-ffunction-sections",
+ "-fdata-sections",
+ "-static",
+ "-O2",
+ "-funroll-loops",
+ "-I/openssl-static/include",
+ "-I/libnetfilter_acct-static/include/libnetfilter_acct",
+ "-I/curl-local/include/curl",
+ "-I/usr/include/libmnl",
+ "-pipe",
+ ]
+
+ LDFLAGS = [
+ "-Wl,--gc-sections",
+ "-static",
+ "-L/openssl-static/lib64",
+ "-L/libnetfilter_acct-static/lib",
+ "-lnetfilter_acct",
+ "-L/usr/lib",
+ "-lmnl",
+ "-L/usr/lib",
+ "-lzstd",
+ "-L/curl-local/lib",
+ ]
+
+ PKG_CONFIG = [
+ "pkg-config",
+ "--static",
+ ]
+
+ PKG_CONFIG_PATH = [
+ "/openssl-static/lib64/pkgconfig",
+ "/libnetfilter_acct-static/lib/pkgconfig",
+ "/usr/lib/pkgconfig",
+ "/curl-local/lib/pkgconfig",
+ ]
+
+ CMAKE_FLAGS = [
+ "-DOPENSSL_ROOT_DIR=/openssl-static",
+ "-DOPENSSL_LIBRARIES=/openssl-static/lib64",
+ "-DCMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE=/openssl-static",
+ "-DLWS_OPENSSL_INCLUDE_DIRS=/openssl-static/include",
+ "-DLWS_OPENSSL_LIBRARIES=/openssl-static/lib64/libssl.a;/openssl-static/lib64/libcrypto.a",
+ ]
+
+ NETDATA_INSTALLER_CMD = [
+ "./netdata-installer.sh",
+ "--install-prefix",
+ "/opt",
+ "--dont-wait",
+ "--dont-start-it",
+ "--disable-exporting-mongodb",
+ "--require-cloud",
+ "--use-system-protobuf",
+ "--dont-scrub-cflags-even-though-it-may-break-things",
+ "--one-time-build",
+ "--enable-lto",
+ ]
+
+ ctr = (
+ ctr.with_workdir("/netdata")
+ .with_env_variable("NETDATA_CMAKE_OPTIONS", "-DCMAKE_BUILD_TYPE=Debug")
+ .with_env_variable("CFLAGS", " ".join(CFLAGS))
+ .with_env_variable("LDFLAGS", " ".join(LDFLAGS))
+ .with_env_variable("PKG_CONFIG", " ".join(PKG_CONFIG))
+ .with_env_variable("PKG_CONFIG_PATH", ":".join(PKG_CONFIG_PATH))
+ .with_env_variable("CMAKE_FLAGS", " ".join(CMAKE_FLAGS))
+ .with_env_variable("EBPF_LIBC", "static")
+ .with_env_variable("IS_NETDATA_STATIC_BINARY", "yes")
+ .with_exec(NETDATA_INSTALLER_CMD)
+ )
+
+ return ctr
+
+
+def static_build(client, repo_path):
+ cmake_build_release_path = os.path.join(repo_path, "cmake-build-release")
+
+ ctr = build_alpine_3_18(client, dagger.Platform("linux/x86_64"))
+ ctr = static_build_openssl(client, ctr)
+ ctr = static_build_bash(client, ctr)
+ ctr = static_build_curl(client, ctr)
+ ctr = static_build_ioping(client, ctr)
+ ctr = static_build_libnetfilter_acct(client, ctr)
+
+ ctr = ctr.with_directory(
+ "/netdata",
+ client.host().directory(repo_path),
+ exclude=[
+ f"{cmake_build_release_path}/*",
+ "fluent-bit/build",
+ ],
+ )
+
+ # TODO: link bin/sbin
+
+ ctr = static_build_netdata(client, ctr)
+
+ build_dir = ctr.directory("/opt/netdata")
+ artifact_dir = os.path.join(Path.home(), "ci/netdata-static")
+ output_task = build_dir.export(artifact_dir)
+ return output_task
+
+
+_CENTOS_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "autogen",
+ "automake",
+ "bison",
+ "bison-devel",
+ "cmake",
+ "cups-devel",
+ "curl",
+ "diffutils",
+ "elfutils-libelf-devel",
+ "findutils",
+ "flex",
+ "flex-devel",
+ "freeipmi-devel",
+ "gcc",
+ "gcc-c++",
+ "git-core",
+ "golang",
+ "json-c-devel",
+ "libyaml-devel",
+ "libatomic",
+ "libcurl-devel",
+ "libmnl-devel",
+ "libnetfilter_acct-devel",
+ "libtool",
+ "libuuid-devel",
+ "libuv-devel",
+ "libzstd-devel",
+ "lm_sensors",
+ "lz4-devel",
+ "make",
+ "ninja-build",
+ "openssl-devel",
+ "openssl-perl",
+ "patch",
+ "pcre2-devel",
+ "pkgconfig",
+ "pkgconfig(libmongoc-1.0)",
+ "procps",
+ "protobuf-c-devel",
+ "protobuf-compiler",
+ "protobuf-devel",
+ "rpm-build",
+ "rpm-devel",
+ "rpmdevtools",
+ "snappy-devel",
+ "systemd-devel",
+ "wget",
+ "zlib-devel",
+]
+
+
+def build_amazon_linux_2(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("amazonlinux:2")
+
+ pkgs = [pkg for pkg in _CENTOS_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["yum", "update", "-y"])
+ .with_exec(["yum", "install", "-y"] + pkgs)
+ .with_exec(["yum", "clean", "all"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ if platform == "linux/x86_64":
+ machine = "x86_64"
+ elif platform == "linux/arm64":
+ machine = "aarch64"
+ else:
+ raise Exception(
+ "Amaxon Linux 2 supports only linux/amd64 and linux/arm64 platforms."
+ )
+
+
+ checksum_path = Path(__file__).parent / f"files/cmake-{machine}.sha256"
+
+ ctr = (
+ ctr.with_file(
+ f"cmake-{machine}.sha256",
+ client.host().file(checksum_path.as_posix()),
+ )
+ .with_exec(
+ [
+ "curl",
+ "--fail",
+ "-sSL",
+ "--connect-timeout",
+ "20",
+ "--retry",
+ "3",
+ "--output",
+ f"cmake-{machine}.sh",
+ f"https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-{machine}.sh",
+ ]
+ )
+ .with_exec(["sha256sum", "-c", f"cmake-{machine}.sha256"])
+ .with_exec(["chmod", "u+x", f"./cmake-{machine}.sh"])
+ .with_exec([f"./cmake-{machine}.sh", "--skip-license", "--prefix=/usr/local"])
+ )
+
+ return ctr
+
+
+def build_centos_7(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("centos:7")
+
+ pkgs = [pkg for pkg in _CENTOS_COMMON_PACKAGES] + ["bash"]
+
+ ctr = (
+ ctr.with_exec(["yum", "install", "-y", "epel-release"])
+ .with_exec(["yum", "update", "-y"])
+ .with_exec(["yum", "install", "-y"] + pkgs)
+ .with_exec(["yum", "clean", "all"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ if platform == "linux/x86_64":
+ machine = "x86_64"
+ elif platform == "linux/arm64":
+ machine = "aarch64"
+ else:
+ raise Exception("CentOS 7 supports only linux/amd64 and linux/arm64 platforms.")
+
+ checksum_path = Path(__file__).parent / f"files/cmake-{machine}.sha256"
+
+ ctr = (
+ ctr.with_file(
+ f"cmake-{machine}.sha256",
+ client.host().file(checksum_path.as_posix()),
+ )
+ .with_exec(
+ [
+ "curl",
+ "--fail",
+ "-sSL",
+ "--connect-timeout",
+ "20",
+ "--retry",
+ "3",
+ "--output",
+ f"cmake-{machine}.sh",
+ f"https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-{machine}.sh",
+ ]
+ )
+ .with_exec(["sha256sum", "-c", f"cmake-{machine}.sha256"])
+ .with_exec(["chmod", "u+x", f"./cmake-{machine}.sh"])
+ .with_exec([f"./cmake-{machine}.sh", "--skip-license", "--prefix=/usr/local"])
+ )
+
+ return ctr
+
+
+_ROCKY_LINUX_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "automake",
+ "bash",
+ "bison",
+ "cmake",
+ "cups-devel",
+ "curl",
+ "libcurl-devel",
+ "diffutils",
+ "elfutils-libelf-devel",
+ "findutils",
+ "flex",
+ "freeipmi-devel",
+ "gcc",
+ "gcc-c++",
+ "git",
+ "golang",
+ "json-c-devel",
+ "libatomic",
+ "libmnl-devel",
+ "libtool",
+ "libuuid-devel",
+ "libuv-devel",
+ "libyaml-devel",
+ "libzstd-devel",
+ "lm_sensors",
+ "lz4-devel",
+ "make",
+ "ninja-build",
+ "nc",
+ "openssl-devel",
+ "openssl-perl",
+ "patch",
+ "pcre2-devel",
+ "pkgconfig",
+ "pkgconfig(libmongoc-1.0)",
+ "procps",
+ "protobuf-c-devel",
+ "protobuf-compiler",
+ "protobuf-devel",
+ "python3",
+ "python3-pyyaml",
+ "rpm-build",
+ "rpm-devel",
+ "rpmdevtools",
+ "snappy-devel",
+ "systemd-devel",
+ "wget",
+ "zlib-devel",
+]
+
+
+def build_rocky_linux_8(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("rockylinux:8")
+
+ pkgs = [pkg for pkg in _ROCKY_LINUX_COMMON_PACKAGES] + ["autogen"]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "dnf-command(config-manager)",
+ "epel-release",
+ ]
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "powertools"])
+ .with_exec(["dnf", "clean", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_rocky_linux_9(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("rockylinux:9")
+
+ pkgs = [pkg for pkg in _ROCKY_LINUX_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "dnf-command(config-manager)",
+ "epel-release",
+ ]
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "crb"])
+ .with_exec(["dnf", "clean", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--allowerasing",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+_CENTOS_STREAM_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "automake",
+ "bash",
+ "bison",
+ "cmake",
+ "cups-devel",
+ "curl",
+ "libcurl-devel",
+ "libyaml-devel",
+ "diffutils",
+ "elfutils-libelf-devel",
+ "findutils",
+ "flex",
+ "freeipmi-devel",
+ "gcc",
+ "gcc-c++",
+ "git",
+ "golang",
+ "json-c-devel",
+ "libatomic",
+ "libmnl-devel",
+ "libtool",
+ "libuuid-devel",
+ "libuv-devel",
+ # "libzstd-devel",
+ "lm_sensors",
+ "lz4-devel",
+ "make",
+ "ninja-build",
+ "nc",
+ "openssl-devel",
+ "openssl-perl",
+ "patch",
+ "pcre2-devel",
+ "pkgconfig",
+ "pkgconfig(libmongoc-1.0)",
+ "procps",
+ "protobuf-c-devel",
+ "protobuf-compiler",
+ "protobuf-devel",
+ "python3",
+ "python3-pyyaml",
+ "rpm-build",
+ "rpm-devel",
+ "rpmdevtools",
+ "snappy-devel",
+ "systemd-devel",
+ "wget",
+ "zlib-devel",
+]
+
+
+def build_centos_stream_8(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("quay.io/centos/centos:stream8")
+
+ pkgs = [pkg for pkg in _CENTOS_STREAM_COMMON_PACKAGES] + ["autogen"]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "dnf-command(config-manager)",
+ "epel-release",
+ ]
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "powertools"])
+ .with_exec(["dnf", "clean", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_centos_stream_9(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("quay.io/centos/centos:stream9")
+
+ pkgs = [pkg for pkg in _CENTOS_STREAM_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "dnf-command(config-manager)",
+ "epel-release",
+ ]
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "crb"])
+ .with_exec(["dnf", "clean", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--allowerasing",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+_ORACLE_LINUX_COMMON_PACKAGES = list(_ROCKY_LINUX_COMMON_PACKAGES)
+
+
+def build_oracle_linux_9(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("oraclelinux:9")
+
+ pkgs = [pkg for pkg in _ORACLE_LINUX_COMMON_PACKAGES]
+
+ repo_path = str(Path(__file__).parent.parent.parent)
+ this_path = os.path.join(repo_path, "packaging/dag")
+
+ ctr = (
+ ctr.with_file(
+ "/etc/yum.repos.d/ol9-epel.repo",
+ client.host().file(f"{this_path}/ol9-epel.repo"),
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "ol9_codeready_builder"])
+ .with_exec(["dnf", "config-manager", "--set-enabled", "ol9_developer_EPEL"])
+ .with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(["dnf", "clean", "-y", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_oracle_linux_8(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("oraclelinux:8")
+
+ pkgs = [pkg for pkg in _ORACLE_LINUX_COMMON_PACKAGES] + ["autogen"]
+
+ repo_path = str(Path(__file__).parent.parent.parent)
+ this_path = os.path.join(repo_path, "packaging/dag")
+
+ ctr = (
+ ctr.with_file(
+ "/etc/yum.repos.d/ol8-epel.repo",
+ client.host().file(f"{this_path}/ol8-epel.repo"),
+ )
+ .with_exec(["dnf", "config-manager", "--set-enabled", "ol8_codeready_builder"])
+ .with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(["dnf", "clean", "-y", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+_OPENSUSE_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "autogen",
+ "automake",
+ "bison",
+ "cmake",
+ "cups",
+ "cups-devel",
+ "curl",
+ "diffutils",
+ "flex",
+ "freeipmi-devel",
+ "gcc",
+ "gcc-c++",
+ "git-core",
+ "go",
+ "json-glib-devel",
+ "judy-devel",
+ "libatomic1",
+ "libcurl-devel",
+ "libelf-devel",
+ "liblz4-devel",
+ "libjson-c-devel",
+ "libyaml-devel",
+ "libmnl0",
+ "libmnl-devel",
+ "libnetfilter_acct1",
+ "libnetfilter_acct-devel",
+ "libpcre2-8-0",
+ "libopenssl-devel",
+ "libtool",
+ "libuv-devel",
+ "libuuid-devel",
+ "libzstd-devel",
+ "make",
+ "ninja",
+ "patch",
+ "pkg-config",
+ "protobuf-devel",
+ "rpm-build",
+ "rpm-devel",
+ "rpmdevtools",
+ "snappy-devel",
+ "systemd-devel",
+ "tar",
+ "wget",
+ "xen-devel",
+]
+
+
+def build_opensuse_tumbleweed(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("opensuse/tumbleweed:latest")
+
+ pkgs = [pkg for pkg in _OPENSUSE_COMMON_PACKAGES] + ["protobuf-c"]
+
+ ctr = (
+ ctr.with_exec(["zypper", "update", "-y"])
+ .with_exec(
+ [
+ "zypper",
+ "install",
+ "-y",
+ "--allow-downgrade",
+ ]
+ + pkgs
+ )
+ .with_exec(["zypper", "clean"])
+ .with_exec(["rm", "-rf", "/var/cache/zypp/*/*"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/usr/src/packages/BUILD",
+ "/usr/src/packages/RPMS",
+ "/usr/src/packages/SOURCES",
+ "/usr/src/packages/SPECS",
+ "/usr/src/packages/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_opensuse_15_5(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("opensuse/leap:15.5")
+
+ pkgs = [pkg for pkg in _OPENSUSE_COMMON_PACKAGES] + ["libprotobuf-c-devel"]
+
+ ctr = (
+ ctr.with_exec(["zypper", "update", "-y"])
+ .with_exec(
+ [
+ "zypper",
+ "install",
+ "-y",
+ "--allow-downgrade",
+ ]
+ + pkgs
+ )
+ .with_exec(["zypper", "clean"])
+ .with_exec(["rm", "-rf", "/var/cache/zypp/*/*"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/usr/src/packages/BUILD",
+ "/usr/src/packages/RPMS",
+ "/usr/src/packages/SOURCES",
+ "/usr/src/packages/SPECS",
+ "/usr/src/packages/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_opensuse_15_4(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ crt = client.container(platform=platform).from_("opensuse/leap:15.4")
+
+ pkgs = [pkg for pkg in _OPENSUSE_COMMON_PACKAGES] + ["libprotobuf-c-devel"]
+
+ crt = (
+ crt.with_exec(["zypper", "update", "-y"])
+ .with_exec(
+ [
+ "zypper",
+ "install",
+ "-y",
+ "--allow-downgrade",
+ ]
+ + pkgs
+ )
+ .with_exec(["zypper", "clean"])
+ .with_exec(["rm", "-rf", "/var/cache/zypp/*/*"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/usr/src/packages/BUILD",
+ "/usr/src/packages/RPMS",
+ "/usr/src/packages/SOURCES",
+ "/usr/src/packages/SPECS",
+ "/usr/src/packages/SRPMS",
+ ]
+ )
+ )
+
+ return crt
+
+
+_FEDORA_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "autogen",
+ "automake",
+ "bash",
+ "bison",
+ "cmake",
+ "cups-devel",
+ "curl",
+ "diffutils",
+ "elfutils-libelf-devel",
+ "findutils",
+ "flex",
+ "freeipmi-devel",
+ "gcc",
+ "gcc-c++",
+ "git-core",
+ "golang",
+ "json-c-devel",
+ "libcurl-devel",
+ "libyaml-devel",
+ "Judy-devel",
+ "libatomic",
+ "libmnl-devel",
+ "libnetfilter_acct-devel",
+ "libtool",
+ "libuuid-devel",
+ "libuv-devel",
+ "libzstd-devel",
+ "lz4-devel",
+ "make",
+ "ninja-build",
+ "openssl-devel",
+ "openssl-perl",
+ "patch",
+ "pcre2-devel",
+ "pkgconfig",
+]
+
+
+def build_fedora_37(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("fedora:37")
+
+ pkgs = [pkg for pkg in _FEDORA_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(["dnf", "clean", "-y", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_fedora_38(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("fedora:38")
+
+ pkgs = [pkg for pkg in _FEDORA_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(["dnf", "clean", "-y", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+def build_fedora_39(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("fedora:39")
+
+ pkgs = [pkg for pkg in _FEDORA_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_exec(["dnf", "distro-sync", "-y", "--nodocs"])
+ .with_exec(["dnf", "clean", "-y", "packages"])
+ .with_exec(
+ [
+ "dnf",
+ "install",
+ "-y",
+ "--nodocs",
+ "--setopt=install_weak_deps=False",
+ "--setopt=diskspacecheck=False",
+ ]
+ + pkgs
+ )
+ .with_exec(["rm", "-rf", "/var/cache/dnf"])
+ .with_exec(["c_rehash"])
+ .with_exec(
+ [
+ "mkdir",
+ "-p",
+ "/root/rpmbuild/BUILD",
+ "/root/rpmbuild/RPMS",
+ "/root/rpmbuild/SOURCES",
+ "/root/rpmbuild/SPECS",
+ "/root/rpmbuild/SRPMS",
+ ]
+ )
+ )
+
+ return ctr
+
+
+_DEBIAN_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "autogen",
+ "automake",
+ "bison",
+ "build-essential",
+ "ca-certificates",
+ "cmake",
+ "curl",
+ "dh-autoreconf",
+ "dh-make",
+ "dpkg-dev",
+ "flex",
+ "g++",
+ "gcc",
+ "git-buildpackage",
+ "git-core",
+ "golang",
+ "libatomic1",
+ "libcurl4-openssl-dev",
+ "libcups2-dev",
+ "libdistro-info-perl",
+ "libelf-dev",
+ "libipmimonitoring-dev",
+ "libjson-c-dev",
+ "libyaml-dev",
+ "libjudy-dev",
+ "liblz4-dev",
+ "libmnl-dev",
+ "libmongoc-dev",
+ "libnetfilter-acct-dev",
+ "libpcre2-dev",
+ "libprotobuf-dev",
+ "libprotoc-dev",
+ "libsnappy-dev",
+ "libsystemd-dev",
+ "libssl-dev",
+ "libtool",
+ "libuv1-dev",
+ "libzstd-dev",
+ "make",
+ "ninja-build",
+ "pkg-config",
+ "protobuf-compiler",
+ "systemd",
+ "uuid-dev",
+ "wget",
+ "zlib1g-dev",
+]
+
+
+def build_debian_10(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("debian:buster")
+
+ pkgs = [pkg for pkg in _DEBIAN_COMMON_PACKAGES] + ["dh-systemd", "libxen-dev"]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+def build_debian_11(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("debian:bullseye")
+
+ pkgs = [pkg for pkg in _DEBIAN_COMMON_PACKAGES] + ["libxen-dev"]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+def build_debian_12(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("debian:bookworm")
+
+ pkgs = [pkg for pkg in _DEBIAN_COMMON_PACKAGES]
+
+ if platform != dagger.Platform("linux/i386"):
+ pkgs.append("libxen-dev")
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+_UBUNTU_COMMON_PACKAGES = [
+ "autoconf",
+ "autoconf-archive",
+ "autogen",
+ "automake",
+ "bison",
+ "build-essential",
+ "ca-certificates",
+ "cmake",
+ "curl",
+ "dh-autoreconf",
+ "dh-make",
+ "dpkg-dev",
+ "flex",
+ "g++",
+ "gcc",
+ "git-buildpackage",
+ "git-core",
+ "golang",
+ "libatomic1",
+ "libcurl4-openssl-dev",
+ "libcups2-dev",
+ "libdistro-info-perl",
+ "libelf-dev",
+ "libipmimonitoring-dev",
+ "libjson-c-dev",
+ "libyaml-dev",
+ "libjudy-dev",
+ "liblz4-dev",
+ "libmnl-dev",
+ "libmongoc-dev",
+ "libnetfilter-acct-dev",
+ "libpcre2-dev",
+ "libprotobuf-dev",
+ "libprotoc-dev",
+ "libsnappy-dev",
+ "libsystemd-dev",
+ "libssl-dev",
+ "libtool",
+ "libuv1-dev",
+ "libxen-dev",
+ "libzstd-dev",
+ "make",
+ "ninja-build",
+ "pkg-config",
+ "protobuf-compiler",
+ "systemd",
+ "uuid-dev",
+ "wget",
+ "zlib1g-dev",
+]
+
+
+def build_ubuntu_20_04(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("ubuntu:20.04")
+
+ pkgs = [pkg for pkg in _UBUNTU_COMMON_PACKAGES] + ["dh-systemd"]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ #
+ # FIXME: add kitware for cmake on arm-hf
+ #
+
+ return ctr
+
+
+def build_ubuntu_22_04(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("ubuntu:22.04")
+
+ pkgs = [pkg for pkg in _UBUNTU_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+def build_ubuntu_23_04(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("ubuntu:23.04")
+
+ pkgs = [pkg for pkg in _UBUNTU_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+def build_ubuntu_23_10(
+ client: dagger.Client, platform: dagger.Platform
+) -> dagger.Container:
+ ctr = client.container(platform=platform).from_("ubuntu:23.10")
+
+ pkgs = [pkg for pkg in _UBUNTU_COMMON_PACKAGES]
+
+ ctr = (
+ ctr.with_env_variable("DEBIAN_FRONTEND", "noninteractive")
+ .with_exec(["apt-get", "update"])
+ .with_exec(["apt-get", "upgrade", "-y"])
+ .with_exec(["apt-get", "install", "-y", "--no-install-recommends"] + pkgs)
+ .with_exec(["apt-get", "clean"])
+ .with_exec(["c_rehash"])
+ .with_exec(["rm", "-rf", "/var/lib/apt/lists/*"])
+ )
+
+ return ctr
+
+
+def install_cargo(ctr: dagger.Container) -> dagger.Container:
+ bin_paths = [
+ "/root/.cargo/bin",
+ "/usr/local/sbin",
+ "/usr/local/bin",
+ "/usr/sbin",
+ "/usr/bin",
+ "/sbin",
+ "/bin",
+ ]
+
+ ctr = (
+ ctr.with_workdir("/")
+ .with_exec(["sh", "-c", "curl https://sh.rustup.rs -sSf | sh -s -- -y"])
+ .with_env_variable("PATH", ":".join(bin_paths))
+ .with_exec(["cargo", "new", "--bin", "hello"])
+ .with_workdir("/hello")
+ .with_exec(["cargo", "run", "-v", "-v"])
+ )
+
+ return ctr
diff --git a/packaging/dag/main.py b/packaging/dag/main.py
new file mode 100755
index 000000000..c7e9670cf
--- /dev/null
+++ b/packaging/dag/main.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+import click
+
+from test_command import test
+from build_command import build
+
+
+@click.group()
+def cli():
+ pass
+
+
+cli.add_command(test)
+cli.add_command(build)
+
+if __name__ == "__main__":
+ cli()
diff --git a/packaging/dag/nd.py b/packaging/dag/nd.py
new file mode 100644
index 000000000..c5dda2c8d
--- /dev/null
+++ b/packaging/dag/nd.py
@@ -0,0 +1,406 @@
+from typing import List
+
+import enum
+import os
+import pathlib
+import uuid
+
+import dagger
+import jinja2
+
+import imageutils
+
+
+class Platform:
+ def __init__(self, platform: str):
+ self.platform = dagger.Platform(platform)
+
+ def escaped(self) -> str:
+ return str(self.platform).removeprefix("linux/").replace("/", "_")
+
+ def __eq__(self, other):
+ if isinstance(other, Platform):
+ return self.platform == other.platform
+ elif isinstance(other, dagger.Platform):
+ return self.platform == other
+ else:
+ return NotImplemented
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash(self.platform)
+
+ def __str__(self) -> str:
+ return str(self.platform)
+
+
+SUPPORTED_PLATFORMS = set(
+ [
+ Platform("linux/x86_64"),
+ Platform("linux/arm64"),
+ Platform("linux/i386"),
+ Platform("linux/arm/v7"),
+ Platform("linux/arm/v6"),
+ Platform("linux/ppc64le"),
+ Platform("linux/s390x"),
+ Platform("linux/riscv64"),
+ ]
+)
+
+
+SUPPORTED_DISTRIBUTIONS = set(
+ [
+ "alpine_3_18",
+ "alpine_3_19",
+ "amazonlinux2",
+ "centos7",
+ "centos-stream8",
+ "centos-stream9",
+ "debian10",
+ "debian11",
+ "debian12",
+ "fedora37",
+ "fedora38",
+ "fedora39",
+ "opensuse15.4",
+ "opensuse15.5",
+ "opensusetumbleweed",
+ "oraclelinux8",
+ "oraclelinux9",
+ "rockylinux8",
+ "rockylinux9",
+ "ubuntu20.04",
+ "ubuntu22.04",
+ "ubuntu23.04",
+ "ubuntu23.10",
+ ]
+)
+
+
+class Distribution:
+ def __init__(self, display_name):
+ self.display_name = display_name
+
+ if self.display_name == "alpine_3_18":
+ self.docker_tag = "alpine:3.18"
+ self.builder = imageutils.build_alpine_3_18
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "alpine_3_19":
+ self.docker_tag = "alpine:3.19"
+ self.builder = imageutils.build_alpine_3_19
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "amazonlinux2":
+ self.docker_tag = "amazonlinux:2"
+ self.builder = imageutils.build_amazon_linux_2
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "centos7":
+ self.docker_tag = "centos:7"
+ self.builder = imageutils.build_centos_7
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "centos-stream8":
+ self.docker_tag = "quay.io/centos/centos:stream8"
+ self.builder = imageutils.build_centos_stream_8
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "centos-stream9":
+ self.docker_tag = "quay.io/centos/centos:stream9"
+ self.builder = imageutils.build_centos_stream_9
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "debian10":
+ self.docker_tag = "debian:10"
+ self.builder = imageutils.build_debian_10
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "debian11":
+ self.docker_tag = "debian:11"
+ self.builder = imageutils.build_debian_11
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "debian12":
+ self.docker_tag = "debian:12"
+ self.builder = imageutils.build_debian_12
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "fedora37":
+ self.docker_tag = "fedora:37"
+ self.builder = imageutils.build_fedora_37
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "fedora38":
+ self.docker_tag = "fedora:38"
+ self.builder = imageutils.build_fedora_38
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "fedora39":
+ self.docker_tag = "fedora:39"
+ self.platforms = SUPPORTED_PLATFORMS
+ self.builder = imageutils.build_fedora_39
+ elif self.display_name == "opensuse15.4":
+ self.docker_tag = "opensuse/leap:15.4"
+ self.builder = imageutils.build_opensuse_15_4
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "opensuse15.5":
+ self.docker_tag = "opensuse/leap:15.5"
+ self.builder = imageutils.build_opensuse_15_5
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "opensusetumbleweed":
+ self.docker_tag = "opensuse/tumbleweed:latest"
+ self.builder = imageutils.build_opensuse_tumbleweed
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "oraclelinux8":
+ self.docker_tag = "oraclelinux:8"
+ self.builder = imageutils.build_oracle_linux_8
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "oraclelinux9":
+ self.docker_tag = "oraclelinux:9"
+ self.builder = imageutils.build_oracle_linux_9
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "rockylinux8":
+ self.docker_tag = "rockylinux:8"
+ self.builder = imageutils.build_rocky_linux_8
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "rockylinux9":
+ self.docker_tag = "rockylinux:9"
+ self.builder = imageutils.build_rocky_linux_9
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "ubuntu20.04":
+ self.docker_tag = "ubuntu:20.04"
+ self.builder = imageutils.build_ubuntu_20_04
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "ubuntu22.04":
+ self.docker_tag = "ubuntu:22.04"
+ self.builder = imageutils.build_ubuntu_22_04
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "ubuntu23.04":
+ self.docker_tag = "ubuntu:23.04"
+ self.builder = imageutils.build_ubuntu_23_04
+ self.platforms = SUPPORTED_PLATFORMS
+ elif self.display_name == "ubuntu23.10":
+ self.docker_tag = "ubuntu:23.10"
+ self.builder = imageutils.build_ubuntu_23_10
+ self.platforms = SUPPORTED_PLATFORMS
+ else:
+ raise ValueError(f"Unknown distribution: {self.display_name}")
+
+ def _cache_volume(
+ self, client: dagger.Client, platform: dagger.Platform, path: str
+ ) -> dagger.CacheVolume:
+ tag = "_".join([self.display_name, Platform(platform).escaped()])
+ return client.cache_volume(f"{path}-{tag}")
+
+ def build(
+ self, client: dagger.Client, platform: dagger.Platform
+ ) -> dagger.Container:
+ if platform not in self.platforms:
+ raise ValueError(
+ f"Building {self.display_name} is not supported on {platform}."
+ )
+
+ ctr = self.builder(client, platform)
+ ctr = imageutils.install_cargo(ctr)
+
+ return ctr
+
+
+class FeatureFlags(enum.Flag):
+ DBEngine = enum.auto()
+ GoPlugin = enum.auto()
+ ExtendedBPF = enum.auto()
+ LogsManagement = enum.auto()
+ MachineLearning = enum.auto()
+ BundledProtobuf = enum.auto()
+
+
+class NetdataInstaller:
+ def __init__(
+ self,
+ platform: Platform,
+ distro: Distribution,
+ repo_root: pathlib.Path,
+ prefix: pathlib.Path,
+ features: FeatureFlags,
+ ):
+ self.platform = platform
+ self.distro = distro
+ self.repo_root = repo_root
+ self.prefix = prefix
+ self.features = features
+
+ def _mount_repo(
+ self, client: dagger.Client, ctr: dagger.Container, repo_root: pathlib.Path
+ ) -> dagger.Container:
+ host_repo_root = pathlib.Path(__file__).parent.parent.parent.as_posix()
+ exclude_dirs = ["build", "fluent-bit/build", "packaging/dag"]
+
+ # The installer builds/stores intermediate artifacts under externaldeps/
+ # We add a volume to speed up rebuilds. The volume has to be unique
+ # per platform/distro in order to avoid mixing unrelated artifacts
+ # together.
+ externaldeps = self.distro._cache_volume(client, self.platform, "externaldeps")
+
+ ctr = (
+ ctr.with_directory(
+ self.repo_root.as_posix(), client.host().directory(host_repo_root)
+ )
+ .with_workdir(self.repo_root.as_posix())
+ .with_mounted_cache(
+ os.path.join(self.repo_root, "externaldeps"), externaldeps
+ )
+ )
+
+ return ctr
+
+ def install(self, client: dagger.Client, ctr: dagger.Container) -> dagger.Container:
+ args = ["--dont-wait", "--dont-start-it", "--disable-telemetry"]
+
+ if FeatureFlags.DBEngine not in self.features:
+ args.append("--disable-dbengine")
+
+ if FeatureFlags.GoPlugin not in self.features:
+ args.append("--disable-go")
+
+ if FeatureFlags.ExtendedBPF not in self.features:
+ args.append("--disable-ebpf")
+
+ if FeatureFlags.MachineLearning not in self.features:
+ args.append("--disable-ml")
+
+ if FeatureFlags.BundledProtobuf not in self.features:
+ args.append("--use-system-protobuf")
+
+ args.extend(["--install-prefix", self.prefix.parent.as_posix()])
+
+ ctr = self._mount_repo(client, ctr, self.repo_root.as_posix())
+
+ ctr = ctr.with_env_variable(
+ "NETDATA_CMAKE_OPTIONS", "-DCMAKE_BUILD_TYPE=Debug"
+ ).with_exec(["./netdata-installer.sh"] + args)
+
+ return ctr
+
+
+class Endpoint:
+ def __init__(self, hostname: str, port: int):
+ self.hostname = hostname
+ self.port = port
+
+ def __str__(self):
+ return ":".join([self.hostname, str(self.port)])
+
+
+class ChildStreamConf:
+ def __init__(
+ self,
+ installer: NetdataInstaller,
+ destinations: List[Endpoint],
+ api_key: uuid.UUID,
+ ):
+ self.installer = installer
+ self.substitutions = {
+ "enabled": "yes",
+ "destination": " ".join([str(dst) for dst in destinations]),
+ "api_key": api_key,
+ "timeout_seconds": 60,
+ "default_port": 19999,
+ "send_charts_matching": "*",
+ "buffer_size_bytes": 1024 * 1024,
+ "reconnect_delay_seconds": 5,
+ "initial_clock_resync_iterations": 60,
+ }
+
+ def render(self) -> str:
+ tmpl_path = pathlib.Path(__file__).parent / "files/child_stream.conf"
+ with open(tmpl_path) as fp:
+ tmpl = jinja2.Template(fp.read())
+
+ return tmpl.render(**self.substitutions)
+
+
+class ParentStreamConf:
+ def __init__(self, installer: NetdataInstaller, api_key: uuid.UUID):
+ self.installer = installer
+ self.substitutions = {
+ "api_key": str(api_key),
+ "enabled": "yes",
+ "allow_from": "*",
+ "default_history": 3600,
+ "health_enabled_by_default": "auto",
+ "default_postpone_alarms_on_connect_seconds": 60,
+ "multiple_connections": "allow",
+ }
+
+ def render(self) -> str:
+ tmpl_path = pathlib.Path(__file__).parent / "files/parent_stream.conf"
+ with open(tmpl_path) as fp:
+ tmpl = jinja2.Template(fp.read())
+
+ return tmpl.render(**self.substitutions)
+
+
+class StreamConf:
+ def __init__(self, child_conf: ChildStreamConf, parent_conf: ParentStreamConf):
+ self.child_conf = child_conf
+ self.parent_conf = parent_conf
+
+ def render(self) -> str:
+ child_section = self.child_conf.render() if self.child_conf else ""
+ parent_section = self.parent_conf.render() if self.parent_conf else ""
+ return "\n".join([child_section, parent_section])
+
+
+class AgentContext:
+ def __init__(
+ self,
+ client: dagger.Client,
+ platform: dagger.Platform,
+ distro: Distribution,
+ installer: NetdataInstaller,
+ endpoint: Endpoint,
+ api_key: uuid.UUID,
+ allow_children: bool,
+ ):
+ self.client = client
+ self.platform = platform
+ self.distro = distro
+ self.installer = installer
+ self.endpoint = endpoint
+ self.api_key = api_key
+ self.allow_children = allow_children
+
+ self.parent_contexts = []
+
+ self.built_distro = False
+ self.built_agent = False
+
+ def add_parent(self, parent_context: "AgentContext"):
+ self.parent_contexts.append(parent_context)
+
+ def build_container(self) -> dagger.Container:
+ ctr = self.distro.build(self.client, self.platform)
+ ctr = self.installer.install(self.client, ctr)
+
+ if len(self.parent_contexts) == 0 and not self.allow_children:
+ return ctr.with_exposed_port(self.endpoint.port)
+
+ destinations = [parent_ctx.endpoint for parent_ctx in self.parent_contexts]
+ child_stream_conf = ChildStreamConf(self.installer, destinations, self.api_key)
+
+ parent_stream_conf = None
+ if self.allow_children:
+ parent_stream_conf = ParentStreamConf(self.installer, self.api_key)
+
+ stream_conf = StreamConf(child_stream_conf, parent_stream_conf)
+
+ # write the stream conf to localhost and cp it in the container
+ host_stream_conf_path = pathlib.Path(
+ f"/tmp/{self.endpoint.hostname}_stream.conf"
+ )
+ with open(host_stream_conf_path, "w") as fp:
+ fp.write(stream_conf.render())
+
+ ctr_stream_conf_path = self.installer.prefix / "etc/netdata/stream.conf"
+
+ ctr = ctr.with_file(
+ ctr_stream_conf_path.as_posix(),
+ self.client.host().file(host_stream_conf_path.as_posix()),
+ )
+
+ ctr = ctr.with_exposed_port(self.endpoint.port)
+
+ return ctr
diff --git a/packaging/dag/requirements.txt b/packaging/dag/requirements.txt
new file mode 100644
index 000000000..66edabfea
--- /dev/null
+++ b/packaging/dag/requirements.txt
@@ -0,0 +1,3 @@
+click==8.1.7
+dagger-io==0.9.7
+Jinja2==3.1.4
diff --git a/packaging/dag/test_command.py b/packaging/dag/test_command.py
new file mode 100644
index 000000000..2418d142e
--- /dev/null
+++ b/packaging/dag/test_command.py
@@ -0,0 +1,128 @@
+import click
+import asyncio
+import sys
+import pathlib
+import dagger
+import uuid
+import httpx
+
+from nd import Distribution, NetdataInstaller, FeatureFlags, Endpoint, AgentContext
+
+
+def run_async(func):
+ def wrapper(*args, **kwargs):
+ return asyncio.run(func(*args, **kwargs))
+
+ return wrapper
+
+
+@run_async
+async def simple_test():
+ config = dagger.Config(log_output=sys.stdout)
+
+ async with dagger.Connection(config) as client:
+ platform = dagger.Platform("linux/x86_64")
+ distro = Distribution("debian10")
+
+ repo_root = pathlib.Path("/netdata")
+ prefix_path = pathlib.Path("/opt/netdata")
+ installer = NetdataInstaller(
+ platform, distro, repo_root, prefix_path, FeatureFlags.DBEngine
+ )
+
+ api_key = uuid.uuid4()
+
+ #
+ # parent
+ #
+ parent_endpoint = Endpoint("parent1", 22000)
+ parent_ctx = AgentContext(
+ client, platform, distro, installer, parent_endpoint, api_key, True
+ )
+ parent_cmd = installer.prefix / "usr/sbin/netdata"
+ parent_args = [
+ parent_cmd.as_posix(),
+ "-D",
+ "-i",
+ "0.0.0.0",
+ "-p",
+ str(parent_endpoint.port),
+ ]
+
+ parent_ctr = parent_ctx.build_container()
+ parent_ctr = parent_ctr.with_exec(parent_args)
+ parent_svc = parent_ctr.as_service()
+
+ #
+ # child
+ #
+ child_endpoint = Endpoint("child1", 21000)
+ child_ctx = AgentContext(
+ client, platform, distro, installer, child_endpoint, api_key, False
+ )
+ child_ctx.add_parent(parent_ctx)
+ child_cmd = installer.prefix / "usr/sbin/netdata"
+ child_args = [
+ child_cmd.as_posix(),
+ "-D",
+ "-i",
+ "0.0.0.0",
+ "-p",
+ str(child_endpoint.port),
+ ]
+
+ child_ctr = child_ctx.build_container()
+ child_ctr = child_ctr.with_service_binding(parent_endpoint.hostname, parent_svc)
+ child_ctr = child_ctr.with_exec(child_args)
+ child_svc = child_ctr.as_service()
+
+ #
+ # endpoints
+ #
+ parent_tunnel, child_tunnel = await asyncio.gather(
+ client.host().tunnel(parent_svc, native=True).start(),
+ client.host().tunnel(child_svc, native=True).start(),
+ )
+
+ parent_endpoint, child_endpoint = await asyncio.gather(
+ parent_tunnel.endpoint(),
+ child_tunnel.endpoint(),
+ )
+
+ await asyncio.sleep(10)
+
+ #
+ # run tests
+ #
+
+ async with httpx.AsyncClient() as http:
+ resp = await http.get(f"http://{parent_endpoint}/api/v1/info")
+
+ #
+ # Check that the child was connected
+ #
+ jd = resp.json()
+ assert (
+ "hosts-available" in jd
+ ), "Could not find 'host-available' key in api/v1/info"
+ assert jd["hosts-available"] == 2, "Child did not connect to parent"
+
+ #
+ # Check bearer protection
+ #
+ forbidden_urls = [
+ f"http://{parent_endpoint}/api/v2/bearer_protection",
+ f"http://{parent_endpoint}/api/v2/bearer_get_token",
+ ]
+
+ for url in forbidden_urls:
+ async with httpx.AsyncClient() as http:
+ resp = await http.get(url)
+ assert (
+ resp.status_code == httpx.codes.UNAVAILABLE_FOR_LEGAL_REASONS
+ ), "Bearer protection is broken"
+
+
+@click.command(help="Run a simple parent/child test")
+def test():
+ simple_test()
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index 8e7c9a7b1..b12af313d 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -29,7 +29,7 @@ RUN chmod +x netdata-installer.sh && \
cp -rp /deps/* /usr/local/ && \
/bin/echo -e "INSTALL_TYPE='oci'\nPREBUILT_ARCH='$(uname -m)'" > ./system/.install-type && \
CFLAGS="$(packaging/docker/gen-cflags.sh)" LDFLAGS="-Wl,--gc-sections" ./netdata-installer.sh --dont-wait --dont-start-it --use-system-protobuf \
- ${EXTRA_INSTALL_OPTS} --disable-ebpf --one-time-build --enable-lto "$([ "$RELEASE_CHANNEL" = stable ] && echo --stable-channel)"
+ ${EXTRA_INSTALL_OPTS} --disable-ebpf --install-no-prefix / "$([ "$RELEASE_CHANNEL" = stable ] && echo --stable-channel)"
# files to one directory
RUN mkdir -p /app/usr/sbin/ \
@@ -122,7 +122,9 @@ RUN addgroup --gid ${NETDATA_GID} --system "${DOCKER_GRP}" && \
freeipmi.plugin \
go.d.plugin \
perf.plugin \
+ ndsudo \
slabinfo.plugin \
+ network-viewer.plugin \
systemd-journal.plugin; do \
[ -f "/usr/libexec/netdata/plugins.d/$name" ] && chmod 4755 "/usr/libexec/netdata/plugins.d/$name"; \
done && \
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index 528ef5926..6deb0cfa9 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -12,6 +12,16 @@ import TabItem from '@theme/TabItem';
# Install Netdata with Docker
+## Limitations running the Agent in Docker
+
+We do not officially support running our Docker images with the Docker CLI `--user` option or the Docker Compose
+`user:` parameter. Such usage will usually still work, but some features will not be available when run this
+way. Note that the agent will drop privileges appropriately inside the container during startup, meaning that even
+when run without these options almost nothing in the container will actually run with an effective UID of 0.
+
+Our POWER8+ Docker images do not support our FreeIPMI collector. This is a technical limitation in FreeIPMI itself,
+and unfortunately not something we can realistically work around.
+
## Create a new Netdata Agent container
You can create a new Agent container using either `docker run` or `docker-compose`. After using any method, you can
@@ -24,12 +34,13 @@ along with their descriptions.
<details open>
<summary>Privileges</summary>
-| Component | Privileges | Description |
-|:---------------:|:-----------------------------:|--------------------------------------------------------------------------------------------------------------------------|
-| cgroups.plugin | host PID mode, SYS_ADMIN | Container network interfaces monitoring. Map virtual interfaces in the system namespace to interfaces inside containers. |
-| proc.plugin | host network mode | Host system networking stack monitoring. |
-| go.d.plugin | host network mode | Monitoring applications running on the host and inside containers. |
-| local-listeners | host network mode, SYS_PTRACE | Discovering local services/applications. Map open (listening) ports to running services/applications. |
+| Component | Privileges | Description |
+|:---------------------:|:-----------------------------:|--------------------------------------------------------------------------------------------------------------------------|
+| cgroups.plugin | host PID mode, SYS_ADMIN | Container network interfaces monitoring. Map virtual interfaces in the system namespace to interfaces inside containers. |
+| proc.plugin | host network mode | Host system networking stack monitoring. |
+| go.d.plugin | host network mode | Monitoring applications running on the host and inside containers. |
+| local-listeners | host network mode, SYS_PTRACE | Discovering local services/applications. Map open (listening) ports to running services/applications. |
+| network-viewer.plugin | host network mode, SYS_ADMIN | Discovering all current network sockets and building a network-map. |
</details>
@@ -39,6 +50,7 @@ along with their descriptions.
| Component | Mounts | Description |
|:----------------------:|:--------------------------:|--------------------------------------------------------------------------------------------------------------------------------------------|
| netdata | /etc/os-release | Host info detection. |
+| diskspace.plugin | / | Host mount points monitoring. |
| cgroups.plugin | /sys, /var/run/docker.sock | Docker containers monitoring and name resolution. |
| go.d.plugin | /var/run/docker.sock | Docker Engine and containers monitoring. See [docker](https://github.com/netdata/go.d.plugin/tree/master/modules/docker#readme) collector. |
| go.d.plugin | /var/log | Web servers logs tailing. See [weblog](https://github.com/netdata/go.d.plugin/tree/master/modules/weblog#readme) collector. |
@@ -69,6 +81,7 @@ docker run -d --name=netdata \
-v netdataconfig:/etc/netdata \
-v netdatalib:/var/lib/netdata \
-v netdatacache:/var/cache/netdata \
+ -v /:/host/root:ro,rslave \
-v /etc/passwd:/host/etc/passwd:ro \
-v /etc/group:/host/etc/group:ro \
-v /etc/localtime:/etc/localtime:ro \
@@ -110,6 +123,7 @@ services:
- netdataconfig:/etc/netdata
- netdatalib:/var/lib/netdata
- netdatacache:/var/cache/netdata
+ - /:/host/root:ro,rslave
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /etc/localtime:/etc/localtime:ro
@@ -161,6 +175,43 @@ Add `- /run/dbus:/run/dbus:ro` to the netdata service `volumes`.
</TabItem>
</Tabs>
+### With NVIDIA GPUs monitoring
+
+
+Monitoring NVIDIA GPUs requires:
+
+- Using official [NVIDIA driver](https://www.nvidia.com/Download/index.aspx).
+- Installing [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
+- Allowing the Netdata container to access GPU resources.
+
+
+<Tabs>
+<TabItem value="docker_run" label="docker run">
+
+<h3> Using the <code>docker run</code> command </h3>
+
+Add `--gpus 'all,capabilities=utility'` to your `docker run`.
+
+</TabItem>
+<TabItem value="docker compose" label="docker-compose">
+
+<h3> Using the <code>docker-compose</code> command</h3>
+
+Add the following to the netdata service.
+
+```yaml
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ count: all
+ capabilities: [gpu]
+```
+
+</TabItem>
+</Tabs>
+
### With host-editable configuration
Use a [bind mount](https://docs.docker.com/storage/bind-mounts/) for `/etc/netdata` rather than a volume.
@@ -185,6 +236,7 @@ docker run -d --name=netdata \
-v $(pwd)/netdataconfig/netdata:/etc/netdata \
-v netdatalib:/var/lib/netdata \
-v netdatacache:/var/cache/netdata \
+ -v /:/host/root:ro,rslave \
-v /etc/passwd:/host/etc/passwd:ro \
-v /etc/group:/host/etc/group:ro \
-v /etc/localtime:/etc/localtime:ro \
@@ -226,6 +278,7 @@ services:
- ./netdataconfig/netdata:/etc/netdata
- netdatalib:/var/lib/netdata
- netdatacache:/var/cache/netdata
+ - /:/host/root:ro,rslave
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /etc/localtime:/etc/localtime:ro
@@ -246,7 +299,7 @@ volumes:
### With SSL/TLS enabled HTTP Proxy
For a permanent installation on a public server, you
-should [secure the Netdata instance](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md). This
+should [secure the Netdata instance](/docs/netdata-agent/securing-netdata-agents.md). This
section contains an example of how to install Netdata with an SSL reverse proxy and basic authentication.
You can use the following `docker-compose.yml` and Caddyfile files to run Netdata with Docker. Replace the domains and
@@ -260,7 +313,7 @@ executed internally by the caddy server.
```caddyfile
netdata.example.org {
- reverse_proxy netdata:19999
+ reverse_proxy host.docker.internal:19999
tls admin@example.org
}
```
@@ -270,11 +323,15 @@ netdata.example.org {
After setting Caddyfile run this with `docker-compose up -d` to have a fully functioning Netdata setup behind an HTTP reverse
proxy.
+Make sure Netdata bind to docker0 interface if you've custom `web.bind to` setting in `netdata.conf`.
+
```yaml
version: '3'
services:
caddy:
image: caddy:2
+ extra_hosts:
+ - "host.docker.internal:host-gateway" # To access netdata running with "network_mode: host".
ports:
- "80:80"
- "443:443"
@@ -285,9 +342,9 @@ services:
netdata:
image: netdata/netdata
container_name: netdata
- hostname: example.com # set to fqdn of host
- restart: always
pid: host
+ network_mode: host
+ restart: unless-stopped
cap_add:
- SYS_PTRACE
- SYS_ADMIN
@@ -297,6 +354,7 @@ services:
- netdataconfig:/etc/netdata
- netdatalib:/var/lib/netdata
- netdatacache:/var/cache/netdata
+ - /:/host/root:ro,rslave
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /etc/localtime:/etc/localtime:ro
@@ -320,8 +378,10 @@ to Caddyfile.
### With Docker socket proxy
+> **Note**: Using Netdata with a Docker socket proxy might have some features not working as expected. It hasn't been fully tested by the Netdata team.
+
Deploy a Docker socket proxy that accepts and filters out requests using something like
-[HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md) or
+[HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) or
[CetusGuard](https://github.com/hectorm/cetusguard) so that it restricts connections to read-only access to
the `/containers` endpoint.
@@ -348,6 +408,7 @@ services:
- netdataconfig:/etc/netdata
- netdatalib:/var/lib/netdata
- netdatacache:/var/cache/netdata
+ - /:/host/root:ro,rslave
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /etc/localtime:/etc/localtime:ro
@@ -395,6 +456,7 @@ services:
- netdataconfig:/etc/netdata
- netdatalib:/var/lib/netdata
- netdatacache:/var/cache/netdata
+ - /:/host/root:ro,rslave
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /etc/localtime:/etc/localtime:ro
@@ -426,6 +488,62 @@ volumes:
You can run the socket proxy in its own Docker Compose file and leave it on a private network that you can add to
other services that require access.
+### Rootless mode
+
+Netdata can be run successfully in a non-root environment, such as [rootless Docker](https://docs.docker.com/engine/security/rootless/).
+
+However, it should be noted that Netdata's data collection capabilities are considerably restricted in rootless Docker
+due to its inherent limitations. While Netdata can function in a rootless environment, it cannot access certain
+resources that require elevated privileges. The following components do not work:
+
+- container network interfaces monitoring (cgroup-network helper)
+- disk I/O and file descriptors of applications and processes (apps.plugin)
+- debugfs.plugin
+- freeipmi.plugin
+- perf.plugin
+- slabinfo.plugin
+- systemd-journal.plugin
+
+This method creates a [volume](https://docs.docker.com/storage/volumes/) for Netdata's configuration files
+_within the container_ at `/etc/netdata`.
+See the [configure section](#configure-agent-containers) for details. If you want to access the configuration files from
+your _host_ machine, see [host-editable configuration](#with-host-editable-configuration).
+
+<Tabs>
+<TabItem value="docker_run" label="docker run">
+
+<h3> Using the <code>docker run</code> command </h3>
+
+Run the following command in your terminal to start a new container.
+
+```bash
+docker run -d --name=netdata \
+ --hostname=$(hostname) \
+ -p 19999:19999 \
+ -v netdataconfig:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ -v /run/user/$UID/docker.sock:/var/run/docker.sock:ro \
+ --restart unless-stopped \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+</TabItem>
+
+</Tabs>
+
+> :bookmark_tabs: Note
+>
+> If you plan to Claim the node to Netdata Cloud, you can find the command with the right parameters by clicking the "
+> Add Nodes" button in your Space's "Nodes" view.
+
## Docker tags
See our full list of Docker images at [Docker Hub](https://hub.docker.com/r/netdata/netdata).
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
index 415573320..6ba16d1ce 100755
--- a/packaging/docker/run.sh
+++ b/packaging/docker/run.sh
@@ -14,38 +14,6 @@ if [ ! -w / ] && [ "${EUID}" -eq 0 ]; then
echo >&2 "WARNING: For more information, see https://learn.netdata.cloud/docs/agent/claim#known-issues-on-older-hosts-with-seccomp-enabled"
fi
-if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
- [ -n "$DISABLE_TELEMETRY" ] ||
- [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] ||
- [ -n "$DO_NOT_TRACK" ]; then
- touch /etc/netdata/.opt-out-from-anonymous-statistics
-fi
-
-chmod o+rX / 2>/dev/null || echo "Unable to change permissions without errors."
-
-BALENA_PGID=$(stat -c %g /var/run/balena.sock 2>/dev/null || true)
-DOCKER_PGID=$(stat -c %g /var/run/docker.sock 2>/dev/null || true)
-
-re='^[0-9]+$'
-if [[ $BALENA_PGID =~ $re ]]; then
- echo "Netdata detected balena-engine.sock"
- DOCKER_HOST='/var/run/balena-engine.sock'
- PGID="$BALENA_PGID"
-elif [[ $DOCKER_PGID =~ $re ]]; then
- echo "Netdata detected docker.sock"
- DOCKER_HOST="/var/run/docker.sock"
- PGID="$DOCKER_PGID"
-fi
-export PGID
-export DOCKER_HOST
-
-if [ -n "${PGID}" ]; then
- echo "Creating docker group ${PGID}"
- addgroup --gid "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
- echo "Assign netdata user to docker group ${PGID}"
- usermod --append --groups "docker" "${DOCKER_USR}" || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
-fi
-
# Needed to read Proxmox VMs and (LXC) containers configuration files (name resolution + CPU and memory limits)
function add_netdata_to_proxmox_conf_files_group() {
group_guid="$(stat -c %g /host/etc/pve 2>/dev/null || true)"
@@ -68,10 +36,65 @@ function add_netdata_to_proxmox_conf_files_group() {
fi
}
-if [ -d "/host/etc/pve" ]; then
- add_netdata_to_proxmox_conf_files_group || true
+if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
+ [ -n "$DISABLE_TELEMETRY" ] ||
+ [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] ||
+ [ -n "$DO_NOT_TRACK" ]; then
+ touch /etc/netdata/.opt-out-from-anonymous-statistics
fi
+chmod o+rX / 2>/dev/null || echo "Unable to change permissions without errors."
+
+if [ "${EUID}" -eq 0 ]; then
+ if [ -n "${NETDATA_EXTRA_APK_PACKAGES}" ]; then
+ echo >&2 "WARNING: Netdata’s Docker images have switched from Alpine to Debian as a base platform. Supplementary package support is now handled through the NETDATA_EXTRA_DEB_PACKAGES variable instead of NETDATA_EXTRA_APK_PACKAGES."
+ echo >&2 "WARNING: The container will still run, but supplementary packages listed in NETDATA_EXTRA_APK_PACKAGES will not be installed."
+ echo >&2 "WARNING: To remove these messages, either undefine NETDATA_EXTRA_APK_PACKAGES, or define it to an empty string."
+ fi
+
+ if [ -n "${NETDATA_EXTRA_DEB_PACKAGES}" ]; then
+ echo "Fetching APT repository metadata."
+ if ! apt-get update; then
+ echo "Failed to fetch APT repository metadata."
+ else
+ echo "Installing supplementary packages."
+ export DEBIAN_FRONTEND="noninteractive"
+ # shellcheck disable=SC2086
+ if ! apt-get install -y --no-install-recommends ${NETDATA_EXTRA_DEB_PACKAGES}; then
+ echo "Failed to install supplementary packages."
+ fi
+ fi
+ fi
+
+ BALENA_PGID=$(stat -c %g /var/run/balena.sock 2>/dev/null || true)
+ DOCKER_PGID=$(stat -c %g /var/run/docker.sock 2>/dev/null || true)
+
+ re='^[0-9]+$'
+ if [[ $BALENA_PGID =~ $re ]]; then
+ echo "Netdata detected balena-engine.sock"
+ DOCKER_HOST='/var/run/balena-engine.sock'
+ PGID="$BALENA_PGID"
+ elif [[ $DOCKER_PGID =~ $re ]]; then
+ echo "Netdata detected docker.sock"
+ DOCKER_HOST="/var/run/docker.sock"
+ PGID="$DOCKER_PGID"
+ fi
+ export PGID
+ export DOCKER_HOST
+
+ if [ -n "${PGID}" ]; then
+ echo "Creating docker group ${PGID}"
+ addgroup --gid "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
+ echo "Assign netdata user to docker group ${PGID}"
+ usermod --append --groups "docker" "${DOCKER_USR}" || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
+ fi
+
+ if [ -d "/host/etc/pve" ]; then
+ add_netdata_to_proxmox_conf_files_group || true
+ fi
+else
+ echo >&2 "WARNING: Entrypoint started as non-root user. This is not officially supported and some features may not be available."
+fi
if mountpoint -q /etc/netdata; then
echo "Copying stock configuration to /etc/netdata"
@@ -97,24 +120,4 @@ if [ -n "${NETDATA_CLAIM_URL}" ] && [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ ! -f /v
-daemon-not-running
fi
-if [ -n "${NETDATA_EXTRA_APK_PACKAGES}" ]; then
- echo >&2 "WARNING: Netdata’s Docker images have switched from Alpine to Debian as a base platform. Supplementary package support is now handled through the NETDATA_EXTRA_DEB_PACKAGES variable instead of NETDATA_EXTRA_APK_PACKAGES."
- echo >&2 "WARNING: The container will still run, but supplementary packages listed in NETDATA_EXTRA_APK_PACKAGES will not be installed."
- echo >&2 "WARNING: To remove these messages, either undefine NETDATA_EXTRA_APK_PACKAGES, or define it to an empty string."
-fi
-
-if [ -n "${NETDATA_EXTRA_DEB_PACKAGES}" ]; then
- echo "Fetching APT repository metadata."
- if ! apt-get update; then
- echo "Failed to fetch APT repository metadata."
- else
- echo "Installing supplementary packages."
- export DEBIAN_FRONTEND="noninteractive"
- # shellcheck disable=SC2086
- if ! apt-get install -y --no-install-recommends ${NETDATA_EXTRA_DEB_PACKAGES}; then
- echo "Failed to install supplementary packages."
- fi
- fi
-fi
-
exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_LISTENER_PORT}" "$@"
diff --git a/packaging/ebpf-co-re.checksums b/packaging/ebpf-co-re.checksums
deleted file mode 100644
index c51f3ef5f..000000000
--- a/packaging/ebpf-co-re.checksums
+++ /dev/null
@@ -1 +0,0 @@
-7ef8d2a0f485b4c81942f66c50e1aedcd568b7997a933c50c0ebbd8353543c08 netdata-ebpf-co-re-glibc-v1.2.8.tar.xz
diff --git a/packaging/ebpf-co-re.version b/packaging/ebpf-co-re.version
deleted file mode 100644
index d1f79a941..000000000
--- a/packaging/ebpf-co-re.version
+++ /dev/null
@@ -1 +0,0 @@
-v1.2.8
diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums
deleted file mode 100644
index 28f023d52..000000000
--- a/packaging/ebpf.checksums
+++ /dev/null
@@ -1,3 +0,0 @@
-9035b6b8dda5230c1ddc44991518a3ee069bd497ad5a8e5448b79dc4b8c51c43 ./netdata-kernel-collector-glibc-v1.2.8.tar.xz
-e5b1a141475f75c60c282a2e3ce8e3914893e75d474c976bad95f66d4c9846c5 ./netdata-kernel-collector-musl-v1.2.8.tar.xz
-d6081a2fedc9435d1ab430697cb101123cebaac07b62fb91d790ca526923f4e3 ./netdata-kernel-collector-static-v1.2.8.tar.xz
diff --git a/packaging/ebpf.version b/packaging/ebpf.version
deleted file mode 100644
index d1f79a941..000000000
--- a/packaging/ebpf.version
+++ /dev/null
@@ -1 +0,0 @@
-v1.2.8
diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums
deleted file mode 100644
index addfc0a50..000000000
--- a/packaging/go.d.checksums
+++ /dev/null
@@ -1,20 +0,0 @@
-9eeb1a06764fcc7f75e25d56916d1566d4a7206de778856165f407409f091470 *config.tar.gz
-9eeb1a06764fcc7f75e25d56916d1566d4a7206de778856165f407409f091470 *go.d.plugin-config-v0.58.0.tar.gz
-cb9bbbc164e16fdef46ddb3a9aafa354fe83a2765a5aa25a7ceeaa4c60d90eb7 *go.d.plugin-v0.58.0.darwin-amd64.tar.gz
-6d5123955f87ebf30e5faf17c8502616ca84f156ae5e6108cb4a83b79dd0fa6b *go.d.plugin-v0.58.0.darwin-arm64.tar.gz
-04a3ceebb345556cfc3f5dd5230c31d06cf59f8f6a6d85c4e8cfb1497ac2c793 *go.d.plugin-v0.58.0.freebsd-386.tar.gz
-9b530d2d7d387c81d0551888b0aa6b55290910f75c5f01a0d399ca29fa83757f *go.d.plugin-v0.58.0.freebsd-amd64.tar.gz
-45f4d0884b3993d3758f63a453ace96207ebdb9f2d97f89d5e42795ca743c6b6 *go.d.plugin-v0.58.0.freebsd-arm.tar.gz
-d4222a6812255946f5d367cd59e8d6284c36b44baaba2925f7268bc42368a41a *go.d.plugin-v0.58.0.freebsd-arm64.tar.gz
-4d71efc97a8f32db36f1d3f925e97531f846d9c39d66fbe63f00097f9a6cd425 *go.d.plugin-v0.58.0.linux-386.tar.gz
-287db876af5a5b093ee91ef937f4ee59ebc5fdf79e403a48042b9f3cf58c716f *go.d.plugin-v0.58.0.linux-amd64.tar.gz
-c0a4f1a20e2d93e1df7adab651b9feb7ca481b0b04e4e12323cad7b8f39e8590 *go.d.plugin-v0.58.0.linux-arm.tar.gz
-b94adb6df7fc3a04cda1078e82c2d97a514c12dcc12f5dba7cec2259a34c89bb *go.d.plugin-v0.58.0.linux-arm64.tar.gz
-5570b0ebc7c1a45c00301b0212531ee178cc06cb47912330ebc3d3d20bed6b13 *go.d.plugin-v0.58.0.linux-mips.tar.gz
-6a850631e1978fdb6ff27923c3779f85e985dd0adb3cfb3767a482777e1802c8 *go.d.plugin-v0.58.0.linux-mips64.tar.gz
-1ac22842fa52b97efac45f39f36e9fe69bd9a47497d91653563e02c2855ea5ff *go.d.plugin-v0.58.0.linux-mips64le.tar.gz
-2487214cf11430e4152fbccf17205764d91e731aa236b2edb994d8242d33db26 *go.d.plugin-v0.58.0.linux-mipsle.tar.gz
-547e4196cd1ebe07054de74f64bcea5ff704376138a495d6b66a6d3f46b22c5f *go.d.plugin-v0.58.0.linux-ppc64.tar.gz
-3917e4c798cca7d5f944eb983f8facc2227abff88fc12398a277ee38010540cd *go.d.plugin-v0.58.0.linux-ppc64le.tar.gz
-089bff22c63c1b79a0081e3c52e26eacafbea3698f967e5d18cee0c7dd0f88f9 *go.d.plugin-vendor-v0.58.0.tar.gz
-089bff22c63c1b79a0081e3c52e26eacafbea3698f967e5d18cee0c7dd0f88f9 *vendor.tar.gz
diff --git a/packaging/go.d.version b/packaging/go.d.version
deleted file mode 100644
index 0bf661714..000000000
--- a/packaging/go.d.version
+++ /dev/null
@@ -1 +0,0 @@
-v0.58.0
diff --git a/packaging/installer/README.md b/packaging/installer/README.md
index a99e869b8..d15925dca 100644
--- a/packaging/installer/README.md
+++ b/packaging/installer/README.md
@@ -1,139 +1,36 @@
-import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
-import { InstallRegexLink, InstallBoxRegexLink } from '@site/src/components/InstallRegexLink/'
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
+# Netdata Agent Installation
-# Install Netdata
+Netdata is very flexible and can be used to monitor all kinds of infrastructure. Read more about possible [Deployment guides](/docs/deployment-guides/README.md) to understand what better suites your needs.
-This document will guide you through installing the open-source Netdata monitoring Agent on Linux, Docker, Kubernetes, and many others, often with one command.
+## Install through Netdata Cloud
-Netdata is very flexible and can be used to monitor all kinds of infrastructure. Read more about possible [Deployment strategies](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/deployment-strategies.md) to understand what better suites your needs.
+The easiest way to install Netdata on your system is via Netdata Cloud, to do so:
-## Get started
+1. Sign up to <https://app.netdata.cloud/>.
+2. You will be presented with an empty space, and a prompt to "Connect Nodes" with the install command for each platform.
+3. Select the platform you want to install Netdata to, copy and paste the script into your node's terminal, and run it.
-Netdata is a free and open-source (FOSS) monitoring agent that collects thousands of hardware and software metrics from
-any physical or virtual system (we call them _nodes_). These metrics are organized in an easy-to-use and -navigate interface.
+Once Netdata is installed, you can see the node live in your Netdata Space and charts in the [Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md).
-Together with [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you can monitor your entire infrastructure in
-real time and troubleshoot problems that threaten the health of your nodes.
+Take a look at our [Dashboards and Charts](/docs/dashboards-and-charts/README.md) section to read more about Netdata's features.
-Netdata runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT devices. It
-runs on Linux distributions (Ubuntu, Debian, CentOS, and more), container/microservice platforms (Kubernetes clusters,
-Docker), and many other operating systems (FreeBSD, macOS), with no `sudo` required.
+## Post-install
-To install Netdata in minutes on your platform:
+### Configuration
-1. Sign up to <https://app.netdata.cloud/>
-2. You will be presented with an empty space, and a prompt to "Connect Nodes" with the install command for each platform
-3. Select the platform you want to install Netdata to, copy and paste the script into your node's terminal, and run it
+If you are looking to configure your Netdata Agent installation, refer to the [respective section in our Documentation](/docs/netdata-agent/configuration/README.md).
-Upon installation completing successfully, you should be able to see the node live in your Netdata Space and live charts
-in the Overview tab. [Read more about the cloud features](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md).
+### Data collection
-Where you go from here is based on your use case, immediate needs, and experience with monitoring and troubleshooting,
-but we have some hints on what you might want to do next.
+If Netdata didn't autodetect all the hardware, containers, services, or applications running on your node, you should learn more about [how data collectors work](/src/collectors/README.md). If there's a [supported integration](/src/collectors/COLLECTORS.md) for metrics you need, refer to its respective page and read about its requirements to configure your endpoint to publish metrics in the correct format and endpoint.
-### What's next?
+### Alerts & notifications
-Explore our [general advanced installation options and troubleshooting](#advanced-installation-options-and-troubleshooting), specific options
-for the [single line installer](#install-on-linux-with-one-line-installer), or [other installation methods](#other-installation-methods).
+Netdata comes with hundreds of pre-configured alerts, designed by our monitoring gurus in parallel with our open-source community, but you may want to [edit alerts](/src/health/REFERENCE.md) or [enable notifications](/docs/alerts-and-notifications/notifications/README.md) to customize your Netdata experience.
-#### Configuration
+### Make your deployment production ready
-Discover the recommended way to [configure Netdata's settings or behavior](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) using our built-in
-`edit-config` script, then apply that knowledge to mission-critical tweaks, such as [changing how long Netdata stores
-metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
-
-#### Data collection
-
-If Netdata didn't autodetect all the hardware, containers, services, or applications running on your node, you should
-learn more about [how data collectors work](https://github.com/netdata/netdata/blob/master/collectors/README.md). If there's a [supported
-collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) for metrics you need, [configure the collector](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md)
-or read about its requirements to configure your endpoint to publish metrics in the correct format and endpoint.
-
-#### Alerts & notifications
-
-Netdata comes with hundreds of preconfigured alerts, designed by our monitoring gurus in parallel with our open-source
-community, but you may want to [edit alerts](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md) or
-[enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to customize your Netdata experience.
-
-#### Make your deployment production ready
-
-Go through our [deployment strategies](https://github.com/netdata/netdata/edit/master/docs/category-overview-pages/deployment-strategies.md),
-for suggested configuration changes for production deployments.
-
-## Install on Linux with one-line installer
-
-The **recommended** way to install Netdata on a Linux node (physical, virtual, container, IoT) is our one-line
-[kickstart script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
-This script automatically installs dependencies and builds Netdata from its source code.
-
-To install, copy the script, paste it into your node's terminal, and hit `Enter` to begin the installation process.
-
- <Tabs>
- <TabItem value="wget" label=<code>wget</code>>
-
- <OneLineInstallWget/>
-
- </TabItem>
- <TabItem value="curl" label=<code>curl</code>>
-
- <OneLineInstallCurl/>
-
- </TabItem>
-</Tabs>
-
-> ### Note
->
-> If you plan to also claim the node to Netdata Cloud, make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space, and `YOUR_ROOM_ID` with the ID of the room you are claiming to.
-> You can leave the room id blank to have your node claimed to the default "All nodes" room.
-
-Jump up to [what's next](#whats-next) to learn how to view your new dashboard and take your next steps in monitoring and
-troubleshooting with Netdata.
-
-## Other installation methods
-
-<InstallRegexLink>
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)"
- os="Run with Docker"
- svg="docker" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md)"
- os="Deploy on Kubernetes"
- svg="kubernetes" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md)"
- os="Install on macOS"
- svg="macos" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)"
- os="Native DEB/RPM packages"
- svg="linux" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)"
- os="Linux from Git"
- svg="linux" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md)"
- os="Linux from source"
- svg="linux" />
- <InstallBoxRegexLink
- to="[](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md)"
- os="Linux for offline nodes"
- svg="linux" />
-</InstallRegexLink>
-
-- [Native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)
-- [Run with Docker](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)
-- [Deploy on Kubernetes](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md)
-- [Install on macOS](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md)
-- [Linux from Git](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)
-- [Linux from source](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md)
-- [Linux for offline nodes](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md)
-
-The full list of all installation methods for various systems is available in [Netdata Learn](https://learn.netdata.cloud),
-under [Installation](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/installation-overview.md).
+Go through our [deployment guides](/docs/deployment-guides/README.md), for suggested configuration changes for production deployments.
## Advanced installation options and troubleshooting
@@ -141,32 +38,16 @@ under [Installation](https://github.com/netdata/netdata/blob/master/docs/categor
By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels.
-If you would prefer to update your Netdata agent manually, you can disable automatic updates by using the `--no-updates`
-option when you install or update Netdata using the [automatic one-line installation
-script](#automatic-one-line-installation-script).
+If you preferred to update your Netdata Agent manually, you can disable automatic updates by using the `--no-updates`
+option when you install or update Netdata using the [automatic one-line installation script](/packaging/installer/methods/kickstart.md).
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --no-updates
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --no-updates
```
-With automatic updates disabled, you can choose exactly when and how you [update
-Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md).
-
-#### Network usage of Netdata’s automatic updater
-
-The auto-update functionality set up by the installation scripts requires working internet access to function
-correctly. In particular, it currently requires access to GitHub (to check if a newer version of the updater script
-is available or not, as well as potentially fetching build-time dependencies that are bundled as part of the install),
-and Google Cloud Storage (to check for newer versions of Netdata and download the sources if there is a newer version).
-
-Note that the auto-update functionality will check for updates to itself independently of updates to Netdata,
-and will try to use the latest version of the updater script whenever possible. This is intended to reduce the
-amount of effort required by users to get updates working again in the event of a bug in the updater code.
-
-### Nightly vs. stable releases
+With automatic updates disabled, you can choose exactly when and how you [update Netdata](/packaging/installer/UPDATE.md).
-The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's
-installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration.
+### Nightly vs. Stable Releases
**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws,
or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when
@@ -187,13 +68,12 @@ the community helps fix any bugs that might have been introduced in previous rel
**Pros of using stable releases:**
-- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata
- installation
+- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation
- Retain more control over the Netdata version you use
### Anonymous statistics
-Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self-hosted PostHog instance within the Netdata infrastructure. Read about the information collected, and learn how to-opt, on our [anonymous statistics](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) page.
+Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self-hosted PostHog instance within the Netdata infrastructure. Read about the information collected, and learn how to-opt, on our [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) page.
The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for
_actively_ contributing to Netdata's future.
@@ -215,12 +95,12 @@ There are three potential workarounds for this:
affect many projects other than just Netdata, and there are unfortunately a number of other services out there
that do not provide IPv6 connectivity, so taking this route is likely to save you time in the future as well.
2. If you are using a system that we publish native packages for (see our [platform support
- policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md) for more details),
+ policy](/docs/netdata-agent/versions-and-platforms.md) for more details),
you can manually set up our native package repositories as outlined in our [native package install
- documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md). Our official
+ documentation](/packaging/installer/methods/packages.md). Our official
package repositories do provide service over IPv6, so they work without issue on hosts without IPv4 connectivity.
3. If neither of the above options work for you, you can still install using our [offline installation
- instructions](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md), though
+ instructions](/packaging/installer/methods/offline.md), though
do note that the offline install source must be prepared from a system with IPv4 connectivity.
#### Older distributions (Ubuntu 14.04, Debian 8, CentOS 6) and OpenSSL
@@ -237,8 +117,8 @@ man-in-the-middle attacks.
#### CentOS 6 and CentOS 8
To install the Agent on certain CentOS and RHEL systems, you must enable non-default repositories, such as EPEL or
-PowerTools, to gather hard dependencies. See the [CentOS 6](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#centos--rhel-6x) and
-[CentOS 8](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#centos--rhel-8x) sections for more information.
+PowerTools, to gather hard dependencies. See the [CentOS 6](/packaging/installer/methods/manual.md#centos--rhel-6x) and
+[CentOS 8](/packaging/installer/methods/manual.md#centos--rhel-8x) sections for more information.
#### Access to file is not permitted
@@ -272,4 +152,4 @@ both.
Our current build process has some issues when using certain configurations of the `clang` C compiler on Linux. See [the
section on `nonrepresentable section on output`
-errors](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#nonrepresentable-section-on-output-errors) for a workaround.
+errors](/packaging/installer/methods/manual.md#nonrepresentable-section-on-output-errors) for a workaround.
diff --git a/packaging/installer/REINSTALL.md b/packaging/installer/REINSTALL.md
index 82cea498a..eeb0e2313 100644
--- a/packaging/installer/REINSTALL.md
+++ b/packaging/installer/REINSTALL.md
@@ -8,17 +8,17 @@ Netdata Agent on your node.
### Reinstalling with the same install type
Run the one-line installer script with the `--reinstall` parameter to reinstall the Netdata Agent. This will preserve
-any [user configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) in `netdata.conf` or other files, and will keep the same install
+any [user configuration](/docs/netdata-agent/configuration/README.md) in `netdata.conf` or other files, and will keep the same install
type that was used for the original install.
If you used any [optional
-parameters](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
+parameters](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you
used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of
optional parameters.
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall
```
### Performing a clean reinstall
@@ -29,13 +29,13 @@ getting a badly broken installation working again. Unlike the regular `--reinsta
different install type than the original install used.
If you used any [optional
-parameters](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
+parameters](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you
used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of
optional parameters.
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall-clean
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall-clean
```
### Changing the install type of an existing installation
@@ -59,8 +59,7 @@ When copying these directories back after the reinstall, you may need to update
## Troubleshooting
If you still experience problems with your Netdata Agent installation after following one of these processes, the next
-best route is to [uninstall](https://github.com/netdata/netdata/blob/master/packaging/installer/UNINSTALL.md) and then try a fresh installation using the [one-line
-installer](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+best route is to [uninstall](/packaging/installer/UNINSTALL.md) and then try a fresh installation using the [one-line
+installer](/packaging/installer/methods/kickstart.md).
-You can also post to our [community forums](https://community.netdata.cloud/c/support/13) or create a new [bug
-report](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml).
+You can also post to our [community forums](https://community.netdata.cloud) or create a new [bug report](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml).
diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md
index a66bd7a28..c7de90d95 100644
--- a/packaging/installer/UNINSTALL.md
+++ b/packaging/installer/UNINSTALL.md
@@ -3,18 +3,18 @@
> ### Note
>
> If you're having trouble updating Netdata, moving from one installation method to another, or generally having
-> issues with your Netdata Agent installation, consider our [reinstalling Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md) instead of removing the Netdata Agent entirely.
+> issues with your Netdata Agent installation, consider our [reinstalling Netdata](/packaging/installer/REINSTALL.md) instead of removing the Netdata Agent entirely.
The recommended method to uninstall Netdata on a system is to use our kickstart installer script with the `--uninstall` option like so:
```sh
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
```
Or (if you have curl but not wget):
```sh
-curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
+curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall
```
This will work in most cases without you needing to do anything more other than accepting removal of configuration
diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md
index 7275ee524..94faa881b 100644
--- a/packaging/installer/UPDATE.md
+++ b/packaging/installer/UPDATE.md
@@ -5,7 +5,7 @@ you installed. If you opted out of automatic updates, you need to update your Ne
or stable version. You can also [enable or disable automatic updates on an existing install](#control-automatic-updates).
> 💡 Looking to reinstall the Netdata Agent to enable a feature, update an Agent that cannot update automatically, or
-> troubleshoot an error during the installation process? See our [reinstallation doc](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md)
+> troubleshoot an error during the installation process? See our [reinstallation doc](/packaging/installer/REINSTALL.md)
> for reinstallation steps.
Before you update the Netdata Agent, check to see if your Netdata Agent is already up-to-date by clicking on the update
@@ -52,7 +52,7 @@ installation script in dry-run mode to attempt to determine what method to use t
command:
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --dry-run
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --dry-run
```
Note that if you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option
@@ -75,7 +75,7 @@ If you installed Netdata using an installation prefix, you will need to add an `
that prefix to this command to make sure it finds Netdata.
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh
```
### Issues with older binpkg installs
@@ -91,13 +91,13 @@ On such installs, you can update Netdata using your distribution package manager
The update process outlined above suffers from the same issues that installing on hosts without IPv4
connectivity does, and requires similar workarounds. For more details check [the explanation in our install
-documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#installs-on-hosts-without-ipv4-connectivity).
+documentation](/packaging/installer/README.md#installs-on-hosts-without-ipv4-connectivity).
### If the kickstart script does not work
If the above command fails, you can [reinstall
-Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md#one-line-installer-script-kickstartsh) to get the latest version. This
-also preserves your [configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) in `netdata.conf` or other files just like updating
+Netdata](/packaging/installer/REINSTALL.md#one-line-installer-script-kickstartsh) to get the latest version. This
+also preserves your [configuration](/docs/netdata-agent/configuration/README.md) in `netdata.conf` or other files just like updating
normally would, though you will need to specify any installation options you used originally again.
## Docker
@@ -121,7 +121,7 @@ docker rm netdata
```
You can now re-create your Netdata container using the `docker` command or a `docker-compose.yml` file. See our [Docker
-installation instructions](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md#create-a-new-netdata-agent-container) for details.
+installation instructions](/packaging/docker/README.md#create-a-new-netdata-agent-container) for details.
## macOS
@@ -132,7 +132,7 @@ brew upgrade netdata
```
Homebrew downloads the latest Netdata via the
-[formulae](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb), ensures all dependencies are met,
+[formulae](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb), ensures all dependencies are met,
and updates Netdata via reinstallation.
If you instead installed Netdata using our one-line installation script, you can use our [regular update
@@ -140,7 +140,7 @@ instructions](#updates-for-most-systems) to update Netdata.
## Manual installation from Git
-If you installed [Netdata manually from Git](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md), you can run that installer again
+If you installed [Netdata manually from Git](/packaging/installer/methods/manual.md), you can run that installer again
to update your agent. First, run our automatic requirements installer, which works on many Linux distributions, to
ensure your system has the dependencies necessary for new features.
@@ -196,7 +196,7 @@ located in the same directory as the main `netdata.conf` file. This file uses PO
variables that are used by the updater.
This configuration file can be edited [using our `edit-config`
-script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md).
+script](/docs/netdata-agent/configuration/README.md).
The following configuration options are currently supported:
@@ -204,6 +204,8 @@ The following configuration options are currently supported:
as a scheduled task. This random delay helps avoid issues resulting from too many nodes trying to reconnect to
the Cloud at the same time. The default value is 3600, which corresponds to one hour. Most users should not ever
need to change this.
+- `NETDATA_MAJOR_VERSION_UPDATES`: If set to a value other than 0, then new major versions will be installed
+ without user confirmation. Must be set to a non-zero value for automated updates to install new major versions.
- `NETDATA_NO_SYSTEMD_JOURNAL`: If set to a value other than 0, skip attempting to install the
`netdata-plugin-systemd-journal` package on supported systems on update. This optional package will be installed
by default on supported systems by the updater if this option is not set. Only affects systems using native packages.
diff --git a/packaging/installer/dependencies/alpine.sh b/packaging/installer/dependencies/alpine.sh
index ee0504b34..77a6fc828 100755
--- a/packaging/installer/dependencies/alpine.sh
+++ b/packaging/installer/dependencies/alpine.sh
@@ -9,32 +9,27 @@ DONT_WAIT=0
package_tree="
alpine-sdk
- git
- gcc
- g++
- automake
- autoconf
cmake
- make
- libatomic
- libtool
- pkgconfig
- tar
+ coreutils
curl
+ elfutils-dev
+ g++
+ gcc
+ git
gzip
+ json-c-dev
+ libatomic
+ libmnl-dev
libuv-dev
lz4-dev
+ make
openssl-dev
- elfutils-dev
+ pkgconfig
python3
- zlib-dev
+ tar
util-linux-dev
- libmnl-dev
- json-c-dev
- musl-fts-dev
- bison
- flex
yaml-dev
+ zlib-dev
"
usage() {
diff --git a/packaging/installer/dependencies/arch.sh b/packaging/installer/dependencies/arch.sh
index 30be834be..7b0c9c54f 100755
--- a/packaging/installer/dependencies/arch.sh
+++ b/packaging/installer/dependencies/arch.sh
@@ -8,32 +8,25 @@ NON_INTERACTIVE=0
DONT_WAIT=0
declare -a package_tree=(
- gcc
- make
- autoconf
- autoconf-archive
- autogen
- automake
- libtool
+ binutils
cmake
- zlib
- util-linux
- libmnl
+ curl
+ gcc
+ git
+ gzip
json-c
- libyaml
+ libelf
+ libmnl
libuv
+ libyaml
lz4
+ make
openssl
- libelf
- git
pkgconfig
- tar
- curl
- gzip
python3
- binutils
- bison
- flex
+ tar
+ util-linux
+ zlib
)
usage() {
diff --git a/packaging/installer/dependencies/centos.sh b/packaging/installer/dependencies/centos.sh
index 532a0a71e..b647b2304 100755
--- a/packaging/installer/dependencies/centos.sh
+++ b/packaging/installer/dependencies/centos.sh
@@ -5,15 +5,9 @@
set -e
declare -a package_tree=(
- autoconf
- autoconf-archive
- automake
- bison
cmake
- cmake3
curl
elfutils-libelf-devel
- flex
findutils
gcc
gcc-c++
@@ -22,7 +16,6 @@ declare -a package_tree=(
json-c-devel
libatomic
libmnl-devel
- libtool
libuuid-devel
libuv-devel
libyaml-devel
diff --git a/packaging/installer/dependencies/debian.sh b/packaging/installer/dependencies/debian.sh
index 692a71191..099963afe 100755
--- a/packaging/installer/dependencies/debian.sh
+++ b/packaging/installer/dependencies/debian.sh
@@ -8,14 +8,8 @@ NON_INTERACTIVE=0
DONT_WAIT=0
package_tree="
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
- flex
g++
gcc
git
@@ -27,12 +21,10 @@ package_tree="
libmnl-dev
libssl-dev
libsystemd-dev
- libtool
libuv1-dev
libyaml-dev
make
pkg-config
- python
python3
tar
uuid-dev
diff --git a/packaging/installer/dependencies/fedora.sh b/packaging/installer/dependencies/fedora.sh
index fc30b6113..151746377 100755
--- a/packaging/installer/dependencies/fedora.sh
+++ b/packaging/installer/dependencies/fedora.sh
@@ -17,23 +17,11 @@ os_version() {
fi
}
-if [[ $(os_version) -gt 24 ]]; then
- ulogd_pkg=
-else
- ulogd_pkg=ulogd
-fi
-
declare -a package_tree=(
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
elfutils-libelf-devel
findutils
- flex
gcc
gcc-c++
git
@@ -41,7 +29,6 @@ declare -a package_tree=(
json-c-devel
libatomic
libmnl-devel
- libtool
libuuid-devel
libuv-devel
libyaml-devel
@@ -53,7 +40,6 @@ declare -a package_tree=(
systemd-devel
tar
zlib-devel
- "${ulogd_pkg}"
)
usage() {
diff --git a/packaging/installer/dependencies/freebsd.sh b/packaging/installer/dependencies/freebsd.sh
index eadbcfa98..91fd2959c 100755
--- a/packaging/installer/dependencies/freebsd.sh
+++ b/packaging/installer/dependencies/freebsd.sh
@@ -8,26 +8,19 @@ NON_INTERACTIVE=0
DONT_WAIT=0
package_tree="
- git
- autoconf
- autoconf-archive
- autogen
- automake
- libtool
- pkgconf
cmake
curl
- gzip
- lzlib
e2fsprogs-libuuid
+ git
+ gzip
json-c
- libyaml
- libuv
liblz4
+ libuv
+ libyaml
+ lzlib
openssl
+ pkgconf
python3
- bison
- flex
"
prompt() {
diff --git a/packaging/installer/dependencies/gentoo.sh b/packaging/installer/dependencies/gentoo.sh
index 9cf7f281a..58e805a2d 100755
--- a/packaging/installer/dependencies/gentoo.sh
+++ b/packaging/installer/dependencies/gentoo.sh
@@ -8,32 +8,26 @@ NON_INTERACTIVE=0
DONT_WAIT=0
package_tree="
+ app-alternatives/gzip
+ app-alternatives/tar
+ app-arch/lz4
+ dev-lang/python
+ dev-libs/json-c
+ dev-libs/libuv
+ dev-libs/libyaml
+ dev-libs/openssl
+ dev-util/cmake
dev-vcs/git
+ net-libs/libmnl
+ net-misc/curl
sys-apps/findutils
+ sys-apps/util-linux
sys-devel/gcc
sys-devel/make
- sys-devel/autoconf
- sys-devel/autoconf-archive
- sys-devel/autogen
- sys-devel/automake
- virtual/pkgconfig
- dev-util/cmake
- app-arch/tar
- net-misc/curl
- app-arch/gzip
- sys-apps/util-linux
- net-libs/libmnl
- dev-libs/json-c
- dev-libs/libyaml
- dev-libs/libuv
- app-arch/lz4
- dev-libs/openssl
virtual/libelf
- dev-lang/python
- dev-libs/libuv
- sys-devel/bison
- sys-devel/flex
+ virtual/pkgconfig
"
+
usage() {
cat << EOF
OPTIONS:
diff --git a/packaging/installer/dependencies/ol.sh b/packaging/installer/dependencies/ol.sh
index 2dc10cee5..fca904a9b 100755
--- a/packaging/installer/dependencies/ol.sh
+++ b/packaging/installer/dependencies/ol.sh
@@ -8,15 +8,9 @@ NON_INTERACTIVE=0
DONT_WAIT=0
declare -a package_tree=(
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
elfutils-libelf-devel
- flex
gcc
gcc-c++
git
@@ -24,7 +18,6 @@ declare -a package_tree=(
json-c-devel
libatomic
libmnl-devel
- libtool
libuuid-devel
libuv-devel
libyaml-devel
diff --git a/packaging/installer/dependencies/opensuse.sh b/packaging/installer/dependencies/opensuse.sh
index ecf1268fc..4fba64095 100755
--- a/packaging/installer/dependencies/opensuse.sh
+++ b/packaging/installer/dependencies/opensuse.sh
@@ -10,14 +10,8 @@ NON_INTERACTIVE=0
DONT_WAIT=0
declare -a package_tree=(
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
- flex
gcc
gcc-c++
git
@@ -28,7 +22,6 @@ declare -a package_tree=(
liblz4-devel
libmnl-devel
libopenssl-devel
- libtool
libuuid-devel
libuv-devel
libyaml-devel
diff --git a/packaging/installer/dependencies/rockylinux.sh b/packaging/installer/dependencies/rockylinux.sh
index cc8d45204..921fd29bf 100755
--- a/packaging/installer/dependencies/rockylinux.sh
+++ b/packaging/installer/dependencies/rockylinux.sh
@@ -8,16 +8,10 @@ NON_INTERACTIVE=0
DONT_WAIT=0
declare -a package_tree=(
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
elfutils-libelf-devel
findutils
- flex
gcc
gcc-c++
git
@@ -25,7 +19,6 @@ declare -a package_tree=(
json-c-devel
libatomic
libmnl-devel
- libtool
libuuid-devel
libuv-devel
libyaml-devel
diff --git a/packaging/installer/dependencies/ubuntu.sh b/packaging/installer/dependencies/ubuntu.sh
index e223ca384..c6e5a0b36 100755
--- a/packaging/installer/dependencies/ubuntu.sh
+++ b/packaging/installer/dependencies/ubuntu.sh
@@ -8,14 +8,8 @@ NON_INTERACTIVE=0
DONT_WAIT=0
package_tree="
- autoconf
- autoconf-archive
- autogen
- automake
- bison
cmake
curl
- flex
g++
gcc
git
@@ -27,7 +21,6 @@ package_tree="
libmnl-dev
libssl-dev
libsystemd-dev
- libtool
libuv1-dev
libyaml-dev
make
diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh
index dd3158d6d..c339ac87c 100644
--- a/packaging/installer/functions.sh
+++ b/packaging/installer/functions.sh
@@ -103,13 +103,33 @@ check_for_curl() {
get() {
url="${1}"
+ checked=0
+ succeeded=0
check_for_curl
if [ -n "${curl}" ]; then
- "${curl}" -q -o - -sSL --connect-timeout 10 --retry 3 "${url}"
- elif command -v wget > /dev/null 2>&1; then
- wget -T 15 -O - "${url}"
+ checked=1
+
+ if "${curl}" -q -o - -sSL --connect-timeout 10 --retry 3 "${url}"; then
+ succeeded=1
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if wget -T 15 -O - "${url}"; then
+ succeeded=1
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
fatal "I need curl or wget to proceed, but neither is available on this system." "L0002"
fi
@@ -124,9 +144,29 @@ download_file() {
check_for_curl
if [ -n "${curl}" ]; then
- run "${curl}" -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"
- elif command -v wget > /dev/null 2>&1; then
- run wget -T 15 -O "${dest}" "${url}"
+ checked=1
+
+ if run "${curl}" -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"; then
+ succeeded=1
+ else
+ rm -f "${dest}"
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if run wget -T 15 -O "${dest}" "${url}"; then
+ succeeded=1
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
echo >&2
echo >&2 "Downloading ${name} from '${url}' failed because of missing mandatory packages."
@@ -192,6 +232,131 @@ netdata_banner() {
}
# -----------------------------------------------------------------------------
+# Feature management and configuration commands
+
+enable_feature() {
+ NETDATA_CMAKE_OPTIONS="$(echo "${NETDATA_CMAKE_OPTIONS}" | sed -e "s/-DENABLE_${1}=Off[[:space:]]*//g" -e "s/-DENABLE_${1}=On[[:space:]]*//g")"
+ if [ "${2}" -eq 1 ]; then
+ NETDATA_CMAKE_OPTIONS="$(echo "${NETDATA_CMAKE_OPTIONS}" | sed "s/$/ -DENABLE_${1}=On/")"
+ else
+ NETDATA_CMAKE_OPTIONS="$(echo "${NETDATA_CMAKE_OPTIONS}" | sed "s/$/ -DENABLE_${1}=Off/")"
+ fi
+}
+
+check_for_module() {
+ if [ -z "${pkgconf}" ]; then
+ pkgconf="$(command -v pkgconf 2>/dev/null)"
+ [ -z "${pkgconf}" ] && pkgconf="$(command -v pkg-config 2>/dev/null)"
+ [ -z "${pkgconf}" ] && fatal "Unable to find a usable pkgconf/pkg-config command, cannot build Netdata." I0013
+ fi
+
+ "${pkgconf}" "${1}"
+ return "${?}"
+}
+
+check_for_feature() {
+ feature_name="${1}"
+ feature_state="${2}"
+ shift 2
+ feature_modules="${*}"
+
+ if [ -z "${feature_state}" ]; then
+ # shellcheck disable=SC2086
+ if check_for_module ${feature_modules}; then
+ enable_feature "${feature_name}" 1
+ else
+ enable_feature "${feature_name}" 0
+ fi
+ else
+ enable_feature "${feature_name}" "${feature_state}"
+ fi
+}
+
+prepare_cmake_options() {
+ NETDATA_CMAKE_OPTIONS="-S ./ -B ${NETDATA_BUILD_DIR} ${CMAKE_OPTS} ${NETDATA_PREFIX+-DCMAKE_INSTALL_PREFIX="${NETDATA_PREFIX}"} ${NETDATA_USER:+-DNETDATA_USER=${NETDATA_USER}} ${NETDATA_CMAKE_OPTIONS} "
+
+ NEED_OLD_CXX=0
+
+ if [ "${FORCE_LEGACY_CXX:-0}" -eq 1 ]; then
+ NEED_OLD_CXX=1
+ else
+ if command -v gcc >/dev/null 2>&1; then
+ if [ "$(gcc --version | head -n 1 | sed 's/(.*) //' | cut -f 2 -d ' ' | cut -f 1 -d '.')" -lt 5 ]; then
+ NEED_OLD_CXX=1
+ fi
+ fi
+
+ if command -v clang >/dev/null 2>&1; then
+ if [ "$(clang --version | head -n 1 | cut -f 3 -d ' ' | cut -f 1 -d '.')" -lt 4 ]; then
+ NEED_OLD_CXX=1
+ fi
+ fi
+ fi
+
+ if [ "${NEED_OLD_CXX}" -eq 1 ]; then
+ NETDATA_CMAKE_OPTIONS="${NETDATA_CMAKE_OPTIONS} -DUSE_CXX_11=On"
+ fi
+
+ if [ "${ENABLE_GO:-1}" -eq 1 ]; then
+ enable_feature PLUGIN_GO 1
+ else
+ enable_feature PLUGIN_GO 0
+ fi
+
+ if [ "${USE_SYSTEM_PROTOBUF:-0}" -eq 1 ]; then
+ enable_feature BUNDLED_PROTOBUF 0
+ else
+ enable_feature BUNDLED_PROTOBUF 1
+ fi
+
+ if [ -z "${ENABLE_SYSTEMD_JOURNAL}" ]; then
+ if check_for_module libsystemd; then
+ if check_for_module libelogind; then
+ ENABLE_SYSTEMD_JOURNAL=0
+ else
+ ENABLE_SYSTEMD_JOURNAL=1
+ fi
+ else
+ ENABLE_SYSTEMD_JOURNAL=0
+ fi
+ fi
+
+ enable_feature PLUGIN_SYSTEMD_JOURNAL "${ENABLE_SYSTEMD_JOURNAL}"
+
+ if command -v cups-config >/dev/null 2>&1 || check_for_module libcups || check_for_module cups; then
+ ENABLE_CUPS=1
+ else
+ ENABLE_CUPS=0
+ fi
+
+ enable_feature PLUGIN_CUPS "${ENABLE_CUPS}"
+
+ IS_LINUX=0
+ [ "$(uname -s)" = "Linux" ] && IS_LINUX=1
+ enable_feature PLUGIN_DEBUGFS "${IS_LINUX}"
+ enable_feature PLUGIN_PERF "${IS_LINUX}"
+ enable_feature PLUGIN_SLABINFO "${IS_LINUX}"
+ enable_feature PLUGIN_CGROUP_NETWORK "${IS_LINUX}"
+ enable_feature PLUGIN_LOCAL_LISTENERS "${IS_LINUX}"
+ enable_feature PLUGIN_NETWORK_VIEWER "${IS_LINUX}"
+ enable_feature PLUGIN_EBPF "${ENABLE_EBPF:-0}"
+
+ enable_feature ACLK "${ENABLE_CLOUD:-1}"
+ enable_feature CLOUD "${ENABLE_CLOUD:-1}"
+ enable_feature BUNDLED_JSONC "${NETDATA_BUILD_JSON_C:-0}"
+ enable_feature DBENGINE "${ENABLE_DBENGINE:-1}"
+ enable_feature H2O "${ENABLE_H2O:-1}"
+ enable_feature ML "${NETDATA_ENABLE_ML:-1}"
+ enable_feature PLUGIN_APPS "${ENABLE_APPS:-1}"
+
+ check_for_feature EXPORTER_PROMETHEUS_REMOTE_WRITE "${EXPORTER_PROMETHEUS}" snappy
+ check_for_feature EXPORTER_MONGODB "${EXPORTER_MONGODB}" libmongoc-1.0
+ check_for_feature PLUGIN_FREEIPMI "${ENABLE_FREEIPMI}" libipmimonitoring
+ check_for_feature PLUGIN_NFACCT "${ENABLE_NFACCT}" libnetfilter_acct libnml
+ check_for_feature PLUGIN_XENSTAT "${ENABLE_XENSTAT}" xenstat xenlight
+}
+
+# -----------------------------------------------------------------------------
# portable service command
service_cmd="$(command -v service 2> /dev/null || true)"
@@ -463,45 +628,6 @@ get_systemd_service_dir() {
fi
}
-install_non_systemd_init() {
- [ "${UID}" != 0 ] && return 1
- key="$(get_os_key)"
-
- if [ -d /etc/init.d ] && [ ! -f /etc/init.d/netdata ]; then
- if expr "${key}" : "^(gentoo|alpine).*"; then
- echo >&2 "Installing OpenRC init file..."
- run cp system/openrc/init.d/netdata /etc/init.d/netdata &&
- run chmod 755 /etc/init.d/netdata &&
- run rc-update add netdata default &&
- return 0
-
- elif expr "${key}" : "^devuan*" || [ "${key}" = "debian-7" ] || [ "${key}" = "ubuntu-12.04" ] || [ "${key}" = "ubuntu-14.04" ]; then
- echo >&2 "Installing LSB init file..."
- run cp system/lsb/init.d/netdata /etc/init.d/netdata &&
- run chmod 755 /etc/init.d/netdata &&
- run update-rc.d netdata defaults &&
- run update-rc.d netdata enable &&
- return 0
- elif expr "${key}" : "^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).*"; then
- echo >&2 "Installing init.d file..."
- run cp system/initd/init.d/netdata /etc/init.d/netdata &&
- run chmod 755 /etc/init.d/netdata &&
- run chkconfig netdata on &&
- return 0
- else
- warning "Could not determine what type of init script to install on this system."
- return 1
- fi
- elif [ -f /etc/init.d/netdata ]; then
- echo >&2 "file '/etc/init.d/netdata' already exists."
- return 0
- else
- warning "Could not determine what type of init script to install on this system."
- fi
-
- return 1
-}
-
run_install_service_script() {
if [ -z "${tmpdir}" ]; then
tmpdir="${TMPDIR:-/tmp}"
@@ -565,90 +691,7 @@ install_netdata_service() {
if [ -x "${NETDATA_PREFIX}/usr/libexec/netdata/install-service.sh" ]; then
run_install_service_script && return 0
else
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- NETDATA_STOP_CMD="netdatacli shutdown-agent"
-
- NETDATA_START_CMD="netdata"
- NETDATA_INSTALLER_START_CMD=""
-
- uname="$(uname 2> /dev/null)"
-
- if [ "${uname}" = "Darwin" ]; then
- if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ]; then
- echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists."
- return 0
- else
- echo >&2 "Installing MacOS X plist file..."
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- run cp system/launchd/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist &&
- run launchctl load /Library/LaunchDaemons/com.github.netdata.plist &&
- NETDATA_START_CMD="launchctl start com.github.netdata" &&
- NETDATA_STOP_CMD="launchctl stop com.github.netdata"
- return 0
- fi
-
- elif [ "${uname}" = "FreeBSD" ]; then
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- run cp system/freebsd/rc.d/netdata /etc/rc.d/netdata && NETDATA_START_CMD="service netdata start" &&
- NETDATA_STOP_CMD="service netdata stop" &&
- NETDATA_INSTALLER_START_CMD="service netdata onestart" &&
- myret=$?
-
- echo >&2 "Note: To explicitly enable netdata automatic start, set 'netdata_enable' to 'YES' in /etc/rc.conf"
- echo >&2 ""
-
- return "${myret}"
-
- elif issystemd; then
- # systemd is running on this system
- NETDATA_START_CMD="systemctl start netdata"
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- NETDATA_STOP_CMD="systemctl stop netdata"
- NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
-
- SYSTEMD_DIRECTORY="$(get_systemd_service_dir)"
-
- if [ "${SYSTEMD_DIRECTORY}x" != "x" ]; then
- ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="run systemctl enable netdata"
- IS_NETDATA_ENABLED="$(systemctl is-enabled netdata 2> /dev/null || echo "Netdata not there")"
- if [ "${IS_NETDATA_ENABLED}" = "disabled" ]; then
- echo >&2 "Netdata was there and disabled, make sure we don't re-enable it ourselves"
- ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="true"
- fi
-
- echo >&2 "Installing systemd service..."
- run cp system/systemd/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" &&
- run systemctl daemon-reload &&
- ${ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED} &&
- return 0
- else
- warning "Could not find a systemd service directory, unable to install Netdata systemd service."
- fi
- else
- install_non_systemd_init
- ret=$?
-
- if [ ${ret} -eq 0 ]; then
- if [ -n "${service_cmd}" ]; then
- NETDATA_START_CMD="service netdata start"
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- NETDATA_STOP_CMD="service netdata stop"
- elif [ -n "${rcservice_cmd}" ]; then
- NETDATA_START_CMD="rc-service netdata start"
- # This is used by netdata-installer.sh
- # shellcheck disable=SC2034
- NETDATA_STOP_CMD="rc-service netdata stop"
- fi
- NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
- fi
-
- return ${ret}
- fi
+ warning "Could not find service install script, not installing Netdata as a system service."
fi
fi
@@ -865,6 +908,28 @@ install_netdata_logrotate() {
}
# -----------------------------------------------------------------------------
+# install netdata journald configuration
+
+install_netdata_journald_conf() {
+ src="${NETDATA_PREFIX}/usr/lib/netdata/system/systemd/journald@netdata.conf"
+
+ [ ! -d /usr/lib/systemd/ ] && return 0
+ [ "${UID}" -ne 0 ] && return 1
+
+ if [ ! -d /usr/lib/systemd/journald@netdata.conf.d/ ]; then
+ run mkdir /usr/lib/systemd/journald@netdata.conf.d/
+ fi
+
+ run cp "${src}" /usr/lib/systemd/journald@netdata.conf.d/netdata.conf
+
+ if [ -f /usr/lib/systemd/journald@netdata.conf.d/netdata.conf ]; then
+ run chmod 644 /usr/lib/systemd/journald@netdata.conf.d/netdata.conf
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
# create netdata.conf
create_netdata_conf() {
@@ -928,6 +993,11 @@ portable_add_user() {
echo >&2 "User '${username}' already exists."
return 0
fi
+ elif command -v dscl > /dev/null 2>&1; then
+ if dscl . read /Users/"${username}" >/dev/null 2>&1; then
+ echo >&2 "User '${username}' already exists."
+ return 0
+ fi
else
if cut -d ':' -f 1 < /etc/passwd | grep "^${username}$" 1> /dev/null 2>&1; then
echo >&2 "User '${username}' already exists."
@@ -946,7 +1016,13 @@ portable_add_user() {
elif command -v adduser 1> /dev/null 2>&1; then
run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0
elif command -v sysadminctl 1> /dev/null 2>&1; then
- run sysadminctl -addUser "${username}" && return 0
+ gid=$(dscl . read /Groups/"${username}" 2>/dev/null | grep PrimaryGroupID | grep -Eo "[0-9]+")
+ if run sysadminctl -addUser "${username}" -shell /usr/bin/false -home /var/empty -GID "$gid"; then
+ # FIXME: I think the proper solution is to create a role account:
+ # -roleAccount + name starting with _ and UID in 200-400 range.
+ run dscl . create /Users/"${username}" IsHidden 1
+ return 0
+ fi
fi
warning "Failed to add ${username} user account!"
@@ -1061,9 +1137,11 @@ install_netdata_updater() {
cat "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
fi
- if issystemd && [ -n "$(get_systemd_service_dir)" ]; then
- cat "${NETDATA_SOURCE_DIR}/system/systemd/netdata-updater.timer" > "$(get_systemd_service_dir)/netdata-updater.timer"
- cat "${NETDATA_SOURCE_DIR}/system/systemd/netdata-updater.service" > "$(get_systemd_service_dir)/netdata-updater.service"
+ # these files are installed by cmake
+ libsysdir="${NETDATA_PREFIX}/usr/lib/netdata/system/systemd/"
+ if [ -d "${libsysdir}" ] && issystemd && [ -n "$(get_systemd_service_dir)" ]; then
+ cat "${libsysdir}/netdata-updater.timer" > "$(get_systemd_service_dir)/netdata-updater.timer"
+ cat "${libsysdir}/netdata-updater.service" > "$(get_systemd_service_dir)/netdata-updater.service"
fi
sed -i -e "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh
index bdd529397..e97902026 100755
--- a/packaging/installer/install-required-packages.sh
+++ b/packaging/installer/install-required-packages.sh
@@ -471,7 +471,7 @@ detect_package_manager_from_distribution() {
package_installer="install_brew"
tree="macos"
if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${brew}" ]; then
- echo >&2 "command 'brew' is required to install packages on a '${distribution} ${version}' system."
+ echo >&2 "command 'brew' is required to install packages on a '${distribution} ${version}' system. Get instructions at https://brew.sh/"
exit 1
fi
;;
@@ -623,77 +623,21 @@ declare -A pkg_find=(
declare -A pkg_distro_sdk=(
['alpine']="alpine-sdk"
+ ['centos']="kernel-headers"
['default']="NOTREQUIRED"
)
-declare -A pkg_autoconf=(
- ['gentoo']="sys-devel/autoconf"
- ['clearlinux']="c-basic"
- ['default']="autoconf"
-)
-
-# required to compile netdata with --enable-sse
-# https://github.com/firehol/netdata/pull/450
-declare -A pkg_autoconf_archive=(
- ['gentoo']="sys-devel/autoconf-archive"
- ['clearlinux']="c-basic"
- ['alpine']="WARNING|"
- ['default']="autoconf-archive"
-
- # exceptions
- ['centos-6']="WARNING|"
- ['rhel-6']="WARNING|"
- ['rhel-7']="WARNING|"
-)
-
-declare -A pkg_autogen=(
- ['gentoo']="sys-devel/autogen"
- ['clearlinux']="c-basic"
- ['alpine']="WARNING|"
- ['default']="autogen"
-
- # exceptions
- ['centos-6']="WARNING|"
- ['rhel-6']="WARNING|"
- ['centos-9']="NOTREQUIRED|"
- ['rhel-9']="NOTREQUIRED|"
-)
-
-declare -A pkg_automake=(
- ['gentoo']="sys-devel/automake"
- ['clearlinux']="c-basic"
- ['default']="automake"
+declare -A pkg_coreutils=(
+ ['alpine']="coreutils"
+ ['default']="NOTREQUIRED"
)
-# Required to build libwebsockets and libmosquitto on some systems.
declare -A pkg_cmake=(
['gentoo']="dev-util/cmake"
['clearlinux']="c-basic"
['default']="cmake"
)
-# bison and flex are required by Fluent-Bit
-declare -A pkg_bison=(
- ['default']="bison"
-)
-
-declare -A pkg_flex=(
- ['default']="flex"
-)
-
-# fts-dev is required by Fluent-Bit on Alpine
-declare -A pkg_fts_dev=(
- ['default']="NOTREQUIRED"
- ['alpine']="musl-fts-dev"
- ['alpine-3.16.9']="fts-dev"
-)
-
-# cmake3 is required by Fluent-Bit on CentOS 7
-declare -A pkg_cmake3=(
- ['default']="NOTREQUIRED"
- ['centos-7']="cmake3"
-)
-
declare -A pkg_json_c_dev=(
['alpine']="json-c-dev"
['arch']="json-c"
@@ -747,6 +691,11 @@ declare -A pkg_libsystemd_dev=(
['default']="systemd-devel"
)
+declare -A pkg_pcre2=(
+ ['macos']="pcre2"
+ ['default']="NOTREQUIRED"
+)
+
declare -A pkg_bridge_utils=(
['gentoo']="net-misc/bridge-utils"
['clearlinux']="network-basic"
@@ -761,13 +710,13 @@ declare -A pkg_curl=(
)
declare -A pkg_gzip=(
- ['gentoo']="app-arch/gzip"
+ ['gentoo']="app-alternatives/gzip"
['macos']="NOTREQUIRED"
['default']="gzip"
)
declare -A pkg_tar=(
- ['gentoo']="app-arch/tar"
+ ['gentoo']="app-alternatives/tar"
['clearlinux']="os-core-update"
['macos']="NOTREQUIRED"
['freebsd']="NOTREQUIRED"
@@ -946,7 +895,7 @@ declare -A pkg_postfix=(
)
declare -A pkg_pkg_config=(
- ['alpine']="pkgconfig"
+ ['alpine']="pkgconf"
['arch']="pkgconfig"
['centos']="pkgconfig"
['debian']="pkg-config"
@@ -1228,6 +1177,7 @@ packages() {
# basic build environment
suitable_package distro-sdk
+ suitable_package coreutils
suitable_package libatomic
require_cmd git || suitable_package git
@@ -1237,14 +1187,9 @@ packages() {
require_cmd gcc-multilib || suitable_package gcc
require_cmd g++ || require_cmd clang++ || suitable_package gxx
- require_cmd make || suitable_package make
- require_cmd autoconf || suitable_package autoconf
- suitable_package autoconf-archive
- require_cmd autogen || suitable_package autogen
- require_cmd automake || suitable_package automake
require_cmd pkg-config || suitable_package pkg-config
require_cmd cmake || suitable_package cmake
- require_cmd cmake3 || suitable_package cmake3
+ require_cmd make || suitable_package make
# -------------------------------------------------------------------------
# debugging tools for development
@@ -1267,8 +1212,6 @@ packages() {
require_cmd tar || suitable_package tar
require_cmd curl || suitable_package curl
require_cmd gzip || suitable_package gzip
- require_cmd bison || suitable_package bison
- require_cmd flex || suitable_package flex
fi
# -------------------------------------------------------------------------
@@ -1300,9 +1243,9 @@ packages() {
suitable_package libuuid-dev
suitable_package libmnl-dev
suitable_package json-c-dev
- suitable_package fts-dev
suitable_package libyaml-dev
suitable_package libsystemd-dev
+ suitable_package pcre2
fi
# -------------------------------------------------------------------------
@@ -1335,9 +1278,6 @@ packages() {
if [ "${PACKAGES_NETDATA_PYTHON}" -ne 0 ]; then
require_cmd python || suitable_package python
-
- # suitable_package python-requests
- # suitable_package python-pip
fi
# -------------------------------------------------------------------------
@@ -1345,9 +1285,6 @@ packages() {
if [ "${PACKAGES_NETDATA_PYTHON3}" -ne 0 ]; then
require_cmd python3 || suitable_package python3
-
- # suitable_package python3-requests
- # suitable_package python3-pip
fi
# -------------------------------------------------------------------------
diff --git a/packaging/installer/installer.nsi b/packaging/installer/installer.nsi
new file mode 100644
index 000000000..c14ccb599
--- /dev/null
+++ b/packaging/installer/installer.nsi
@@ -0,0 +1,128 @@
+!include "MUI2.nsh"
+!include "nsDialogs.nsh"
+!include "FileFunc.nsh"
+
+Name "Netdata"
+Outfile "netdata-installer.exe"
+InstallDir "$PROGRAMFILES\Netdata"
+RequestExecutionLevel admin
+
+!define MUI_ICON "NetdataWhite.ico"
+!define MUI_UNICON "NetdataWhite.ico"
+
+!define ND_UININSTALL_REG "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata"
+
+!define MUI_ABORTWARNING
+!define MUI_UNABORTWARNING
+
+!insertmacro MUI_PAGE_WELCOME
+!insertmacro MUI_PAGE_LICENSE "C:\msys64\gpl-3.0.txt"
+!insertmacro MUI_PAGE_DIRECTORY
+!insertmacro MUI_PAGE_INSTFILES
+!insertmacro MUI_PAGE_FINISH
+
+!insertmacro MUI_UNPAGE_CONFIRM
+!insertmacro MUI_UNPAGE_INSTFILES
+!insertmacro MUI_UNPAGE_FINISH
+
+!insertmacro MUI_LANGUAGE "English"
+
+Function .onInit
+ nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata'
+ pop $0
+ ${If} $0 == 0
+ nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata'
+ pop $0
+ ${EndIf}
+FunctionEnd
+
+Function NetdataUninstallRegistry
+ ClearErrors
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayName" "Netdata - Real-time system monitoring."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayIcon" "$INSTDIR\Uninstall.exe,0"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "UninstallString" "$INSTDIR\Uninstall.exe"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "RegOwner" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "RegCompany" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "Publisher" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "HelpLink" "https://learn.netdata.cloud/"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "URLInfoAbout" "https://www.netdata.cloud/"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayVersion" "${CURRVERSION}"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "VersionMajor" "${MAJORVERSION}"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "VersionMinor" "${MINORVERSION}"
+
+ IfErrors 0 +2
+ MessageBox MB_ICONEXCLAMATION|MB_OK "Unable to create an entry in the Control Panel!" IDOK end
+
+ ClearErrors
+ ${GetSize} "$INSTDIR" "/S=0K" $0 $1 $2
+ IntFmt $0 "0x%08X" $0
+ WriteRegDWORD HKLM "${ND_UININSTALL_REG}" "EstimatedSize" "$0"
+
+ IfErrors 0 +2
+ MessageBox MB_ICONEXCLAMATION|MB_OK "Cannot estimate the installation size." IDOK end
+ end:
+FunctionEnd
+
+Section "Install Netdata"
+ SetOutPath $INSTDIR
+ SetCompress off
+
+ File /r "C:\msys64\opt\netdata\*.*"
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe create Netdata binPath= "$INSTDIR\usr\bin\netdata.exe" start= delayed-auto'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to create Netdata service."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe description Netdata "Real-time system monitoring service"'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to add Netdata service description."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe start Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to start Netdata service."
+ ${EndIf}
+
+ WriteUninstaller "$INSTDIR\Uninstall.exe"
+
+ Call NetdataUninstallRegistry
+SectionEnd
+
+Section "Uninstall"
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to stop Netdata service."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to delete Netdata service."
+ ${EndIf}
+
+ RMDir /r "$INSTDIR"
+
+ DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata"
+SectionEnd
+
diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh
index f7c078274..72b82be26 100755
--- a/packaging/installer/kickstart.sh
+++ b/packaging/installer/kickstart.sh
@@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
-# Next unused error code: F0516
+# Next unused error code: F051B
# ======================================================================
# Constants
@@ -21,10 +21,10 @@ KICKSTART_SOURCE="$(
)"
DEFAULT_PLUGIN_PACKAGES=""
PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
-REPOCONFIG_DEB_VERSION="2-1"
-REPOCONFIG_RPM_VERSION="2-1"
+REPOCONFIG_DEB_VERSION="3-2"
+REPOCONFIG_RPM_VERSION="3-2"
START_TIME="$(date +%s)"
-STATIC_INSTALL_ARCHES="x86_64 armv7l aarch64 ppc64le"
+STATIC_INSTALL_ARCHES="x86_64 armv7l armv6l aarch64 ppc64le"
# ======================================================================
# URLs used throughout the script
@@ -125,6 +125,7 @@ main() {
;;
esac
+ handle_existing_install
set_tmpdir
if [ -n "${INSTALL_VERSION}" ]; then
@@ -195,6 +196,7 @@ USAGE: kickstart.sh [options]
--no-cleanup Don't do any cleanup steps. This is intended to help with debugging the installer.
--local-build-options Specify additional options to pass to the installer code when building locally. Only valid if --build-only is also specified.
--static-install-options Specify additional options to pass to the static installer code. Only valid if --static-only is also specified.
+ --offline-architecture Limit an offline install source being prepared with --prepare-offline-install-source to only include the specified static build architecture.
The following options are mutually exclusive and specifiy special operations other than trying to install Netdata normally or update an existing install:
@@ -310,23 +312,31 @@ telemetry_event() {
EOF
)"
+ succeeded=0
+
if [ -n "${CURL}" ]; then
- "${CURL}" --silent -o /dev/null -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" "${TELEMETRY_URL}" > /dev/null
- elif command -v wget > /dev/null 2>&1; then
- if wget --help 2>&1 | grep BusyBox > /dev/null 2>&1; then
- # BusyBox-compatible version of wget, there is no --no-check-certificate option
- wget -q -O - \
- -T 1 \
- --header 'Content-Type: application/json' \
- --post-data "${REQ_BODY}" \
- "${TELEMETRY_URL}" > /dev/null
- else
- wget -q -O - --no-check-certificate \
- --method POST \
- --timeout=1 \
- --header 'Content-Type: application/json' \
- --body-data "${REQ_BODY}" \
- "${TELEMETRY_URL}" > /dev/null
+ if "${CURL}" --silent -o /dev/null -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" "${TELEMETRY_URL}" > /dev/null; then
+ succeeded=1
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ if wget --help 2>&1 | grep BusyBox > /dev/null 2>&1; then
+ # BusyBox-compatible version of wget, there is no --no-check-certificate option
+ wget -q -O - \
+ -T 1 \
+ --header 'Content-Type: application/json' \
+ --post-data "${REQ_BODY}" \
+ "${TELEMETRY_URL}" > /dev/null
+ else
+ wget -q -O - --no-check-certificate \
+ --method POST \
+ --timeout=1 \
+ --header 'Content-Type: application/json' \
+ --body-data "${REQ_BODY}" \
+ "${TELEMETRY_URL}" > /dev/null
+ fi
fi
fi
}
@@ -367,6 +377,17 @@ trap 'trap_handler 15 0' TERM
# ======================================================================
# Utility functions
+canonical_path() {
+ OLDPWD="$(pwd)"
+ cd "$(dirname "${1}")" || exit 1
+ case "$(basename "${1}")" in
+ ..) dirname "$(pwd -P)" ;;
+ .) pwd -P ;;
+ *) echo "$(pwd -P)/$(basename "${1}")" ;;
+ esac
+ cd "${OLDPWD}" || exit 1
+}
+
setup_terminal() {
TPUT_RESET=""
TPUT_WHITE=""
@@ -402,6 +423,9 @@ support_list() {
}
success_banner() {
+ printf >&2 "%s\n" "To view your system's real-time performance metrics, open your web browser and enter http://NODE:19999."
+ printf >&2 "%s\n\n" "Replace NODE with the IP address or hostname of your Netdata server to access the dashboard."
+
printf >&2 "%s\n\n" "Official documentation can be found online at ${DOCS_URL}."
if [ -z "${CLAIM_TOKEN}" ]; then
@@ -431,7 +455,7 @@ deferred_warnings() {
fatal() {
deferred_warnings
- printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${1}"
+ printf >&2 "%b\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${1}"
printf >&2 "%s\n" "For community support, you can connect with us on:"
support_list
telemetry_event "INSTALL_FAILED" "${1}" "${2}"
@@ -530,11 +554,15 @@ run_script() {
# shellcheck disable=SC2086
run ${ROOTCMD} "${@}"
+ ret="$?"
+
if [ -r "${NETDATA_SCRIPT_STATUS_PATH}" ]; then
# shellcheck disable=SC1090
. "${NETDATA_SCRIPT_STATUS_PATH}"
rm -f "${NETDATA_SCRIPT_STATUS_PATH}"
fi
+
+ return "${ret}"
}
warning() {
@@ -588,13 +616,38 @@ set_tmpdir() {
check_for_remote_file() {
url="${1}"
+ succeeded=0
+ checked=0
if echo "${url}" | grep -Eq "^file:///"; then
[ -e "${url#file://}" ] || return 1
- elif [ -n "${CURL}" ]; then
- "${CURL}" --output /dev/null --silent --head --fail "${url}" || return 1
- elif command -v wget > /dev/null 2>&1; then
- wget -S --spider "${url}" 2>&1 | grep -q 'HTTP/1.1 200 OK' || return 1
+ return 0
+ elif [ -n "${NETDATA_ASSUME_REMOTE_FILES_ARE_PRESENT}" ]; then
+ return 0
+ fi
+
+ if [ -n "${CURL}" ]; then
+ checked=1
+
+ if "${CURL}" --output /dev/null --silent --head --fail "${url}"; then
+ succeeded=1
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if wget -S --spider "${url}" 2>&1 | grep -q 'HTTP/1.1 200 OK'; then
+ succeeded=1
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
fatal "${ERROR_F0003}" F0003
fi
@@ -603,13 +656,39 @@ check_for_remote_file() {
download() {
url="${1}"
dest="${2}"
+ succeeded=0
+ checked=0
if echo "${url}" | grep -Eq "^file:///"; then
run cp "${url#file://}" "${dest}" || return 1
- elif [ -n "${CURL}" ]; then
- run "${CURL}" --fail -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}" || return 1
- elif command -v wget > /dev/null 2>&1; then
- run wget -T 15 -O "${dest}" "${url}" || return 1
+ return 0
+ fi
+
+
+ if [ -n "${CURL}" ]; then
+ checked=1
+
+ if run "${CURL}" --fail -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"; then
+ succeeded=1
+ else
+ rm -f "${dest}"
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if run wget -T 15 -O "${dest}" "${url}"; then
+ succeeded=1
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
fatal "${ERROR_F0003}" F0003
fi
@@ -633,19 +712,37 @@ get_actual_version() {
get_redirect() {
url="${1}"
+ succeeded=0
+ checked=0
if [ -n "${CURL}" ]; then
- run sh -c "${CURL} ${url} -s -L -I -o /dev/null -w '%{url_effective}' | grep -o '[^/]*$'" || return 1
- elif command -v wget > /dev/null 2>&1; then
- run sh -c "wget -S -O /dev/null ${url} 2>&1 | grep -m 1 Location | grep -o '[^/]*$'" || return 1
+ checked=1
+
+ if run sh -c "${CURL} ${url} -s -L -I -o /dev/null -w '%{url_effective}' | grep -Eo '[^/]+$'"; then
+ succeeded=1
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if run sh -c "wget -S -O /dev/null ${url} 2>&1 | grep -m 1 Location | grep -Eo '[^/]+$'"; then
+ succeeded=1
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
fatal "${ERROR_F0003}" F0003
fi
}
safe_sha256sum() {
- # Within the context of the installer, we only use -c option that is common between the two commands
- # We will have to reconsider if we start using non-common options
if command -v shasum > /dev/null 2>&1; then
shasum -a 256 "$@"
elif command -v sha256sum > /dev/null 2>&1; then
@@ -655,6 +752,17 @@ safe_sha256sum() {
fi
}
+report_bad_sha256sum() {
+ file="${1}"
+ sums="${2}"
+
+ actual="$(safe_sha256sum "${file}" | awk '{ print $1 }')"
+ expected="$(grep "${file}" "${sums}" | awk '{ print $1 }')"
+
+ printf "Expected: %s\n" "${expected}"
+ printf "Actual: %s\n" "${actual}"
+}
+
get_system_info() {
SYSARCH="$(uname -m)"
@@ -796,6 +904,10 @@ update() {
opts="--interactive"
fi
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ export NETDATA_OFFLINE_INSTALL_SOURCE="${NETDATA_OFFLINE_INSTALL_SOURCE}"
+ fi
+
if run_script "${updater}" ${opts} --not-running-from-cron; then
progress "Updated existing install at ${ndprefix}"
return 0
@@ -864,31 +976,47 @@ detect_existing_install() {
set_tmpdir
progress "Checking for existing installations of Netdata..."
+ EXISTING_INSTALL_IS_NATIVE="0"
- if pkg_installed netdata; then
- ndprefix="/"
- EXISTING_INSTALL_IS_NATIVE="1"
+ if [ -n "${INSTALL_PREFIX}" ]; then
+ searchpath="/opt/netdata/bin:${INSTALL_PREFIX}/bin:${INSTALL_PREFIX}/sbin:${INSTALL_PREFIX}/usr/bin:${INSTALL_PREFIX}/usr/sbin:${PATH}"
+ searchpath="${INSTALL_PREFIX}/netdata/bin:${INSTALL_PREFIX}/netdata/sbin:${INSTALL_PREFIX}/netdata/usr/bin:${INSTALL_PREFIX}/netdata/usr/sbin:${searchpath}"
else
- EXISTING_INSTALL_IS_NATIVE="0"
- if [ -n "${INSTALL_PREFIX}" ]; then
- searchpath="${INSTALL_PREFIX}/bin:${INSTALL_PREFIX}/sbin:${INSTALL_PREFIX}/usr/bin:${INSTALL_PREFIX}/usr/sbin:${PATH}"
- searchpath="${INSTALL_PREFIX}/netdata/bin:${INSTALL_PREFIX}/netdata/sbin:${INSTALL_PREFIX}/netdata/usr/bin:${INSTALL_PREFIX}/netdata/usr/sbin:${searchpath}"
- else
- searchpath="${PATH}"
- fi
+ searchpath="/opt/netdata/bin:${PATH}"
+ fi
- ndpath="$(PATH="${searchpath}" command -v netdata 2>/dev/null)"
+ while [ -n "${searchpath}" ]; do
+ _ndpath="$(PATH="${searchpath}" command -v netdata 2>/dev/null)"
- if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ]; then
- ndpath="/opt/netdata/bin/netdata"
+ if [ -n "${_ndpath}" ]; then
+ _ndpath="$(canonical_path "${_ndpath}")"
fi
- if [ -n "${ndpath}" ]; then
- case "${ndpath}" in
- */usr/bin/netdata|*/usr/sbin/netdata) ndprefix="$(dirname "$(dirname "$(dirname "${ndpath}")")")" ;;
- *) ndprefix="$(dirname "$(dirname "${ndpath}")")" ;;
- esac
+ if [ -z "${ndpath}" ] && [ -n "${_ndpath}" ]; then
+ ndpath="${_ndpath}"
+ elif [ -n "${_ndpath}" ] && [ "${ndpath}" != "${_ndpath}" ]; then
+ fatal "Multiple installs of Netdata agent detected (located at '${ndpath}' and '${_ndpath}'). Such a setup is not generally supported. If you are certain you want to operate on one of them despite this, use the '--install-prefix' option to specifiy the install you want to operate on." F0517
+ fi
+
+ if [ -n "${INSTALL_PREFIX}" ] && [ -n "${ndpath}" ]; then
+ break
+ elif [ -z "${_ndpath}" ]; then
+ break
+ elif echo "${searchpath}" | grep -v ':'; then
+ searchpath=""
+ else
+ searchpath="$(echo "${searchpath}" | cut -f 2- -d ':')"
fi
+ done
+
+ if pkg_installed netdata; then
+ ndprefix="/"
+ EXISTING_INSTALL_IS_NATIVE="1"
+ elif [ -n "${ndpath}" ]; then
+ case "${ndpath}" in
+ */usr/bin/netdata|*/usr/sbin/netdata) ndprefix="$(dirname "$(dirname "$(dirname "${ndpath}")")")" ;;
+ *) ndprefix="$(dirname "$(dirname "${ndpath}")")" ;;
+ esac
if echo "${ndprefix}" | grep -Eq '^/usr$'; then
ndprefix="$(dirname "${ndprefix}")"
@@ -1254,7 +1382,7 @@ set_auto_updates() {
if [ "${DRY_RUN}" -eq 1 ]; then
progress "Would have attempted to enable automatic updates."
# This first case is for catching using a new kickstart script with an old build. It can be safely removed after v1.34.0 is released.
- elif ! run_as_root grep -q '\-\-enable-auto-updates' "${updater}"; then
+ elif ! run_as_root grep -q '\--enable-auto-updates' "${updater}"; then
echo
elif ! run_as_root "${updater}" --enable-auto-updates "${NETDATA_AUTO_UPDATE_TYPE}"; then
warning "Failed to enable auto updates. Netdata will still work, but you will need to update manually."
@@ -1325,7 +1453,7 @@ netdata_avail_check() {
;;
centos|fedora|ol|amzn)
# shellcheck disable=SC2086
- ${pm_cmd} search --nogpgcheck -v netdata | grep -qE 'Repo *: netdata(-edge)?$'
+ LC_ALL=C ${pm_cmd} search --nogpgcheck -v netdata | grep -qE 'Repo *: netdata(-edge)?$'
return $?
;;
opensuse)
@@ -1341,7 +1469,7 @@ check_special_native_deps() {
if [ "${DISTRO_COMPAT_NAME}" = "centos" ] && [ "${SYSVERSION}" -gt 6 ]; then
progress "EPEL is required on this system, checking if it’s available."
- if ${pm_cmd} search --nogpgcheck -v epel-release | grep -q "No matches found"; then
+ if LC_ALL=C ${pm_cmd} search --nogpgcheck -v epel-release | grep -q "No matches found"; then
warning "Unable to find a suitable source for libuv, cannot install using native packages on this system."
return 1
else
@@ -1356,6 +1484,14 @@ check_special_native_deps() {
fi
}
+cleanup_apt_cache() {
+ cache_dir="/var/cache/apt/archives"
+
+ if [ -d "${cache_dir}" ]; then
+ run_as_root find "${cache_dir}" -type f -name 'netdata*.deb' -delete
+ fi
+}
+
common_rpm_opts() {
pkg_type="rpm"
pkg_suffix=".noarch"
@@ -1422,6 +1558,7 @@ try_package_install() {
install_subcmd="install"
fi
needs_early_refresh=1
+ needs_apt_cache_cleanup=1
pm_cmd="apt-get"
repo_subcmd="update"
pkg_type="deb"
@@ -1496,15 +1633,21 @@ try_package_install() {
deb)
repoconfig_file="${repoconfig_name}${pkg_vsep}${REPOCONFIG_DEB_VERSION}${pkg_suffix}.${pkg_type}"
repoconfig_url="${REPOCONFIG_DEB_URL_PREFIX}/${repo_prefix}/${repoconfig_file}"
+ ref_check_url="${REPOCONFIG_DEB_URL_PREFIX}"
;;
rpm)
repoconfig_file="${repoconfig_name}${pkg_vsep}${REPOCONFIG_RPM_VERSION}${pkg_suffix}.${pkg_type}"
repoconfig_url="${REPOCONFIG_RPM_URL_PREFIX}/${repo_prefix}/${SYSARCH}/${repoconfig_file}"
+ ref_check_url="${REPOCONFIG_RPM_URL_PREFIX}"
;;
esac
if ! pkg_installed "${repoconfig_name}"; then
progress "Checking for availability of repository configuration package."
+ if ! check_for_remote_file "${ref_check_url}"; then
+ NETDATA_ASSUME_REMOTE_FILES_ARE_PRESENT=1
+ fi
+
if ! check_for_remote_file "${repoconfig_url}"; then
warning "No repository configuration package available for ${DISTRO} ${SYSVERSION}. Cannot install native packages on this system."
return 2
@@ -1520,6 +1663,10 @@ try_package_install() {
warning "${failed_refresh_msg}"
return 2
fi
+
+ if [ -n "${needs_apt_cache_cleanup}" ]; then
+ cleanup_apt_cache
+ fi
fi
# shellcheck disable=SC2086
@@ -1643,6 +1790,10 @@ try_static_install() {
progress "Attempting to install using static build..."
fi
+ if ! check_for_remote_file "${NETDATA_TARBALL_BASEURL}"; then
+ NETDATA_ASSUME_REMOTE_FILES_ARE_PRESENT=1
+ fi
+
# Check status code first, so that we can provide nicer fallback for dry runs.
if check_for_remote_file "${NETDATA_STATIC_ARCHIVE_URL}"; then
netdata_agent="${NETDATA_STATIC_ARCHIVE_NAME}"
@@ -1650,15 +1801,15 @@ try_static_install() {
netdata_agent="${NETDATA_STATIC_ARCHIVE_OLD_NAME}"
export NETDATA_STATIC_ARCHIVE_URL="${NETDATA_STATIC_ARCHIVE_OLD_URL}"
else
- warning "There is no static build available for ${SYSARCH} CPUs. This usually means we simply do not currently provide static builds for ${SYSARCH} CPUs."
+ warning "Could not find a ${SELECTED_RELEASE_CHANNEL} static build for ${SYSARCH} CPUs. This usually means there is some networking issue preventing access to https://github.com/ from this system."
return 2
fi
- if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "${tmpdir}/${netdata_agent}"; then
+ if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "./${netdata_agent}"; then
fatal "Unable to download static build archive for ${SYSARCH}. ${BADNET_MSG}." F0208
fi
- if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt"; then
+ if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "./sha256sum.txt"; then
fatal "Unable to fetch checksums to verify static build archive. ${BADNET_MSG}." F0206
fi
@@ -1666,8 +1817,9 @@ try_static_install() {
progress "Would validate SHA256 checksum of downloaded static build archive."
else
if [ -z "${INSTALL_VERSION}" ]; then
- if ! grep "${netdata_agent}" "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
- fatal "Static binary checksum validation failed. ${BADCACHE_MSG}." F0207
+ if ! grep "${netdata_agent}" ./sha256sum.txt | safe_sha256sum -c - > /dev/null 2>&1; then
+ bad_sums_report="$(report_bad_sha256sum "${netdata_agent}" "./sha256sum.txt")"
+ fatal "Static binary checksum validation failed.\n${bad_sums_report}\n${BADCACHE_MSG}." F0207
fi
fi
fi
@@ -1676,9 +1828,15 @@ try_static_install() {
opts="${opts} --accept"
fi
+ env_cmd="env NETDATA_CERT_TEST_URL=${NETDATA_CLAIM_URL} NETDATA_CERT_MODE=check"
+
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ env_cmd="env NETDATA_CERT_TEST_URL=${NETDATA_CLAIM_URL} NETDATA_CERT_MODE=auto"
+ fi
+
progress "Installing netdata"
# shellcheck disable=SC2086
- if ! run_as_root sh "${tmpdir}/${netdata_agent}" ${opts} -- ${NETDATA_INSTALLER_OPTIONS}; then
+ if ! run_as_root ${env_cmd} /bin/sh "./${netdata_agent}" ${opts} -- ${NETDATA_INSTALLER_OPTIONS}; then
warning "Failed to install static build of Netdata on ${SYSARCH}."
run rm -rf /opt/netdata
return 2
@@ -1750,8 +1908,14 @@ install_local_build_dependencies() {
fi
# shellcheck disable=SC2086
- if ! run_as_root "${bash}" "${tmpdir}/install-required-packages.sh" ${opts} netdata; then
- warning "Failed to install all required packages, but installation might still be possible."
+ if [ "$(uname -s)" = "Darwin" ]; then
+ if ! run "${bash}" "${tmpdir}/install-required-packages.sh" ${opts} netdata; then
+ warning "Failed to install all required packages, but installation might still be possible."
+ fi
+ else
+ if ! run_as_root "${bash}" "${tmpdir}/install-required-packages.sh" ${opts} netdata; then
+ warning "Failed to install all required packages, but installation might still be possible."
+ fi
fi
}
@@ -1784,6 +1948,7 @@ build_and_install() {
run_script ./netdata-installer.sh ${opts}
case $? in
+ 0) ;;
1)
if [ -n "${EXIT_REASON}" ]; then
fatal "netdata-installer.sh failed to run: ${EXIT_REASON}" "${EXIT_CODE}"
@@ -1792,6 +1957,7 @@ build_and_install() {
fi
;;
2) fatal "Insufficient RAM to install netdata." F0008 ;;
+ *) fatal "netdata-installer.sh failed to run: Encountered an unhandled error in the installer code." F051A ;;
esac
}
@@ -1811,14 +1977,14 @@ try_build_install() {
set_source_archive_urls "${SELECTED_RELEASE_CHANNEL}"
if [ -n "${INSTALL_VERSION}" ]; then
- if ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz"; then
+ if ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "./netdata-v${INSTALL_VERSION}.tar.gz"; then
fatal "Failed to download source tarball for local build. ${BADNET_MSG}." F000B
fi
- elif ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "${tmpdir}/netdata-latest.tar.gz"; then
+ elif ! download "${NETDATA_SOURCE_ARCHIVE_URL}" "./netdata-latest.tar.gz"; then
fatal "Failed to download source tarball for local build. ${BADNET_MSG}." F000B
fi
- if ! download "${NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt"; then
+ if ! download "${NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL}" "./sha256sum.txt"; then
fatal "Failed to download checksums for source tarball verification. ${BADNET_MSG}." F000C
fi
@@ -1827,18 +1993,19 @@ try_build_install() {
else
if [ -z "${INSTALL_VERSION}" ]; then
# shellcheck disable=SC2086
- if ! grep netdata-latest.tar.gz "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
- fatal "Tarball checksum validation failed. ${BADCACHE_MSG}." F0005
+ if ! grep netdata-latest.tar.gz "./sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ bad_sums_report="$(report_bad_sha256sum netdata-latest.tar.gz "./sha256sum.txt")"
+ fatal "Tarball checksum validation failed.\n${bad_sums_report}\n${BADCACHE_MSG}." F0005
fi
fi
fi
if [ -n "${INSTALL_VERSION}" ]; then
- run tar -xf "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz" -C "${tmpdir}"
- rm -rf "${tmpdir}/netdata-v${INSTALL_VERSION}.tar.gz" > /dev/null 2>&1
+ run tar -xf "./netdata-v${INSTALL_VERSION}.tar.gz" -C "${tmpdir}"
+ rm -rf "./netdata-v${INSTALL_VERSION}.tar.gz" > /dev/null 2>&1
else
- run tar -xf "${tmpdir}/netdata-latest.tar.gz" -C "${tmpdir}"
- rm -rf "${tmpdir}/netdata-latest.tar.gz" > /dev/null 2>&1
+ run tar -xf "./netdata-latest.tar.gz" -C "${tmpdir}"
+ rm -rf "./netdata-latest.tar.gz" > /dev/null 2>&1
fi
if [ "${DRY_RUN}" -ne 1 ]; then
@@ -1875,13 +2042,21 @@ prepare_offline_install_source() {
static|'')
set_static_archive_urls "${SELECTED_RELEASE_CHANNEL}" "x86_64"
+ if ! check_for_remote_file "${NETDATA_TARBALL_BASEURL}"; then
+ NETDATA_ASSUME_REMOTE_FILES_ARE_PRESENT=1
+ fi
+
if check_for_remote_file "${NETDATA_STATIC_ARCHIVE_URL}"; then
- for arch in ${STATIC_INSTALL_ARCHES}; do
+ for arch in $(echo "${NETDATA_OFFLINE_ARCHES:-${STATIC_INSTALL_ARCHES}}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}{printf("\n")}'); do
set_static_archive_urls "${SELECTED_RELEASE_CHANNEL}" "${arch}"
- progress "Fetching ${NETDATA_STATIC_ARCHIVE_URL}"
- if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "netdata-${arch}-latest.gz.run"; then
- warning "Failed to download static installer archive for ${arch}. ${BADNET_MSG}."
+ if check_for_remote_file "${NETDATA_STATIC_ARCHIVE_URL}"; then
+ progress "Fetching ${NETDATA_STATIC_ARCHIVE_URL}"
+ if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "netdata-${arch}-latest.gz.run"; then
+ warning "Failed to download static installer archive for ${arch}. ${BADNET_MSG}."
+ fi
+ else
+ progress "Skipping ${NETDATA_STATIC_ARCHIVE_URL} as it does not exist on the server."
fi
done
legacy=0
@@ -1895,6 +2070,10 @@ prepare_offline_install_source() {
fi
fi
+ if ! find . -name '*.gz.run'; then
+ fatal "Did not actually download any static installer archives, cannot continue. ${BADNET_MSG}." F0516
+ fi
+
progress "Fetching ${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}"
if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "sha256sums.txt"; then
fatal "Failed to download checksum file. ${BADNET_MSG}." F0506
@@ -1909,8 +2088,16 @@ prepare_offline_install_source() {
if [ "${DRY_RUN}" -ne 1 ]; then
progress "Verifying checksums."
- if ! grep -e "$(find . -name '*.gz.run')" sha256sums.txt | safe_sha256sum -c -; then
- fatal "Checksums for offline install files are incorrect. ${BADCACHE_MSG}." F0507
+
+ failed_files=""
+ for file in $(find . -name '*.gz.run'); do
+ if ! grep -e "${file}" sha256sums.txt | safe_sha256sum -c -; then
+ failed_files="${failed_files}\n${file}\n$(report_bad_sha256sums "${file}" sha256sums.txt)"
+ fi
+ done
+
+ if [ -n "${failed_files}" ]; then
+ fatal "Checksums for offline install files are incorrect.\n${failed_files}\n${BADCACHE_MSG}." F0507
fi
else
progress "Would verify SHA256 checksums of downloaded installation files."
@@ -2087,6 +2274,10 @@ validate_args() {
case "${NETDATA_FORCE_METHOD}" in
native|build) fatal "Offline installs are only supported for static builds currently." F0502 ;;
esac
+
+ if [ ! -d "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ fatal "Offline install source must be a directory." F0519
+ fi
fi
if [ -n "${LOCAL_BUILD_OPTIONS}" ]; then
@@ -2296,9 +2487,19 @@ parse_args() {
fatal "A target directory must be specified with the --prepare-offline-install-source option." F0500
fi
;;
+ "--offline-architecture")
+ if echo "${STATIC_INSTALL_ARCHES}" | grep -qw "${2}"; then
+ NETDATA_OFFLINE_ARCHES="${NETDATA_OFFLINE_ARCHES} ${2}"
+ else
+ fatal "${2} is not a recognized static build architecture (supported architectures are ${STATIC_INSTALL_ARCHES})" F0518
+ fi
+ shift 1
+ ;;
"--offline-install-source")
if [ -d "${2}" ]; then
NETDATA_OFFLINE_INSTALL_SOURCE="${2}"
+ # shellcheck disable=SC2164
+ NETDATA_TARBALL_BASEURL="file://$(cd "${2}"; pwd)"
shift 1
else
fatal "A source directory must be specified with the --offline-install-source option." F0501
@@ -2325,8 +2526,4 @@ confirm_root_support
get_system_info
confirm_install_prefix
-if [ -z "${ACTION}" ]; then
- handle_existing_install
-fi
-
main
diff --git a/packaging/installer/methods/ansible.md b/packaging/installer/methods/ansible.md
index 6ce4e8f0f..0aadeff91 100644
--- a/packaging/installer/methods/ansible.md
+++ b/packaging/installer/methods/ansible.md
@@ -10,7 +10,7 @@ learn_rel_path: "Installation/Install on specific environments"
# Deploy Netdata with Ansible
-Netdata's [one-line kickstart](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#install-on-linux-with-one-line-installer) is zero-configuration, highly adaptable, and compatible with tons
+Netdata's [one-line kickstart](/packaging/installer/README.md#install-on-linux-with-one-line-installer) is zero-configuration, highly adaptable, and compatible with tons
of different operating systems and Linux distributions. You can use it on bare metal, VMs, containers, and everything
in-between.
@@ -103,9 +103,9 @@ two different SSH keys supplied by AWS.
### Edit the `vars/main.yml` file
-In order to connect your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite
-charts](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) or perform [Metric
-Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md), you need to set the `claim_token`
+In order to connect your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in composite
+charts or perform [Metric
+Correlations](/docs/metric-correlations.md), you need to set the `claim_token`
and `claim_room` variables.
To find your `claim_token` and `claim_room`, go to Netdata Cloud, then click on your Space's name in the top navigation,
@@ -130,7 +130,7 @@ hostname of the node, the playbook disables that local dashboard by setting `web
security boost by not allowing any unwanted access to the local dashboard.
You can read more about this decision, or other ways you might lock down the local dashboard, in our [node security
-doc](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md).
+doc](/docs/security-and-privacy-design/README.md).
> Curious about why Netdata's dashboard is open by default? Read our [blog
> post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) on that zero-configuration design decision.
diff --git a/packaging/installer/methods/aws.md b/packaging/installer/methods/aws.md
index c0b92a036..8648a8f0b 100644
--- a/packaging/installer/methods/aws.md
+++ b/packaging/installer/methods/aws.md
@@ -11,12 +11,12 @@ learn_rel_path: "Installation/Install on specific environments"
Netdata is fully compatible with Amazon Web Services (AWS).
You can install Netdata on cloud instances to monitor the apps/services running there, or use
-multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+multiple instances in a [parent-child streaming](/src/streaming/README.md) configuration.
## Recommended installation method
The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
-we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+we recommend the [`kickstart.sh` automatic installation script](/packaging/installer/methods/kickstart.md).
If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
followed by the solution for your provider.
@@ -41,11 +41,11 @@ command from a remote system, and it fails, it's likely that a firewall is block
Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
(80/443), which are likely already open on your instance. We have a number of guides available:
-- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
-- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
-- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
-- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
-- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md)
+- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md)
+- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md)
+- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md)
+- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md)
Sign in to the [AWS console](https://console.aws.amazon.com/) and navigate to the EC2 dashboard. Click on the **Security
Groups** link in the navigation, beneath the **Network & Security** heading. Find the Security Group your instance
diff --git a/packaging/installer/methods/azure.md b/packaging/installer/methods/azure.md
index 4c39a00ad..94590eecb 100644
--- a/packaging/installer/methods/azure.md
+++ b/packaging/installer/methods/azure.md
@@ -11,12 +11,12 @@ learn_rel_path: "Installation/Install on specific environments"
Netdata is fully compatible with Azure.
You can install Netdata on cloud instances to monitor the apps/services running there, or use
-multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+multiple instances in a [parent-child streaming](/src/streaming/README.md) configuration.
## Recommended installation method
The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
-we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+we recommend the [`kickstart.sh` automatic installation script](/packaging/installer/methods/kickstart.md).
If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
followed by the solution for your provider.
@@ -41,11 +41,11 @@ command from a remote system, and it fails, it's likely that a firewall is block
Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
(80/443), which are likely already open on your instance. We have a number of guides available:
-- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
-- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
-- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
-- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
-- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md)
+- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md)
+- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md)
+- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md)
+- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md)
Sign in to the [Azure portal](https://portal.azure.com) and open the virtual machine running Netdata. Click on the
**Networking** link beneath the **Settings** header, then click on the **Add inbound security rule** button.
diff --git a/packaging/installer/methods/freebsd.md b/packaging/installer/methods/freebsd.md
index 21670cdc9..3a33d2e90 100644
--- a/packaging/installer/methods/freebsd.md
+++ b/packaging/installer/methods/freebsd.md
@@ -28,19 +28,17 @@ Please respond in the affirmative for any relevant prompts during the installati
## Install Netdata
-The simplest method is to use the single line [kickstart script](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/kickstart)
+The simplest method is to use the single line [kickstart script](/packaging/installer/methods/kickstart.md)
If you have a Netdata cloud account then clicking on the **Connect Nodes** button will generate the kickstart command you should use. Use the command from the "Linux" tab, it should look something like this:
```sh
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --claim-token <CLAIM_TOKEN> --claim-url https://app.netdata.cloud
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --claim-token <CLAIM_TOKEN> --claim-url https://app.netdata.cloud
```
Please respond in the affirmative for any relevant prompts during the installation process.
Once the installation is completed, you should be able to start monitoring the FreeBSD server using Netdata.
-![image](https://user-images.githubusercontent.com/24860547/202489210-3c5a3346-8f53-4b7b-9832-f9383b34d864.png)
-
Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).
## Manual installation
@@ -86,11 +84,9 @@ cd /opt/netdata/usr/libexec/netdata/ && ./netdata-updater.sh
You can now access the Netdata dashboard by navigating to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your system.
-![image](https://user-images.githubusercontent.com/2662304/48304090-fd384080-e51b-11e8-80ae-eecb03118dda.png)
-
Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self hosted PostHog instance within the Netdata infrastructure. To read
more about the information collected and how to opt-out, check the [anonymous statistics
-page](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md).
+page](/docs/netdata-agent/configuration/anonymous-telemetry-events.md).
## Updating the Agent on FreeBSD
If you have not passed the `--auto-update` or `-u` parameter for the installer to enable automatic updating, repeat the last step to update Netdata whenever a new version becomes available.
@@ -134,7 +130,7 @@ The following options are mutually exclusive and specifiy special operations oth
- `--uninstall`: Uninstall an existing installation of Netdata. Fails if there is no existing install.
- `--claim-only`: If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally.
- `--repositories-only`: Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only.
-- `--prepare-offline-install-source`: Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md) for more info.
+- `--prepare-offline-install-source`: Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info.
Additionally, the following environment variables may be used to further customize how the script runs (most users
should not need to use special values for any of these):
diff --git a/packaging/installer/methods/gcp.md b/packaging/installer/methods/gcp.md
index 0b16b1096..5003decb4 100644
--- a/packaging/installer/methods/gcp.md
+++ b/packaging/installer/methods/gcp.md
@@ -12,12 +12,12 @@ learn_rel_path: "Installation/Install on specific environments"
Netdata is fully compatible with the Google Cloud Platform (GCP).
You can install Netdata on cloud instances to monitor the apps/services running there, or use
-multiple instances in a [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) configuration.
+multiple instances in a [parent-child streaming](/src/streaming/README.md) configuration.
## Recommended installation method
The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
-we recommend the [`kickstart.sh` automatic installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+we recommend the [`kickstart.sh` automatic installation script](/packaging/installer/methods/kickstart.md).
If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
followed by the solution for your provider.
@@ -42,11 +42,11 @@ command from a remote system, and it fails, it's likely that a firewall is block
Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
(80/443), which are likely already open on your instance. We have a number of guides available:
-- [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
-- [Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
-- [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
-- [HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md)
-- [lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md)
+- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md)
+- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md)
+- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md)
+- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md)
To add a firewall rule, go to the [Firewall rules page](https://console.cloud.google.com/networking/firewalls/list) and
diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md
index b21f4dde9..a525cc70d 100644
--- a/packaging/installer/methods/kickstart.md
+++ b/packaging/installer/methods/kickstart.md
@@ -1,13 +1,3 @@
-<!--
-title: "Install Netdata with kickstart.sh"
-description: "The kickstart.sh script installs Netdata from source, including all dependencies required to connect to Netdata Cloud, with a single command."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/packaging/installer/methods/kickstart.md"
-sidebar_label: "One line installer (kickstart.sh)"
-learn_status: "Published"
-learn_rel_path: "Installation/Installation methods"
-sidebar_position: 10
--->
-
import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
import { Install, InstallBox } from '@site/src/components/Install/'
import Tabs from '@theme/Tabs';
@@ -19,28 +9,13 @@ import TabItem from '@theme/TabItem';
`kickstart.sh` is the recommended way of installing Netdata.
-This script works on all Linux distributions and macOS environments, by detecting the optimal method of installing Netdata directly to the operating system (it will never install a docker image of Netdata - to run Netdata in a container [check Installing with Docker](https://learn.netdata.cloud/docs/installing/docker)).
-
-If you are installing on macOS, make sure to check the [install documentation for macOS](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/macos.md) before continuing.
-
-
-## Verify script integrity
-
-To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above,
-run the following:
-
-```bash
-[ "<checksum-will-be-added-in-documentation-processing>" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
-```
-
-If the script is valid, this command will return `OK, VALID`.
-
+This script works on all Linux distributions and macOS environments, by detecting the optimal method of installing Netdata directly to the operating system.
## Installation
> :bulb: Tip
>
-> If you are unsure whether you want nightly or stable releases, read the [installation guide](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#nightly-vs-stable-releases).
+> If you are unsure whether you want nightly or stable releases, read the [related section](/packaging/installer/README.md#nightly-vs-stable-releases) of our Documentation, detailing the pros and cons of each release type.
To install Netdata, run the following as your normal user:
@@ -58,10 +33,20 @@ To install Netdata, run the following as your normal user:
</Tabs>
> :bookmark_tabs: Note
->
+>
> If you plan to also connect the node to Netdata Cloud, make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space,
-> and `YOUR_ROOM_ID` with the ID of the room you are willing to connect the node to.
+> and `YOUR_ROOM_ID` with the ID of the Room you are willing to connect the node to.
+## Verify script integrity
+
+To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above,
+run the following:
+
+```bash
+[ "@KICKSTART_CHECKSUM@" = "$(curl -Ss https://get.netdata.cloud/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+```
+
+If the script is valid, this command will return `OK, VALID`.
## What does `kickstart.sh` do?
@@ -78,6 +63,116 @@ The `kickstart.sh` script does the following after being downloaded and run usin
versions, unless you override that with an [optional parameter](#optional-parameters-to-alter-your-installation).
- Prints a message whether installation succeeded or failed for QA purposes.
+## Start stop or restart the Netdata Agent
+
+You will most often need to _restart_ the Agent to load new or edited configuration files.
+
+> **Note**
+> Stopping or restarting the Netdata Agent will cause gaps in stored metrics until the `netdata` process initiates collectors and the database engine.
+>
+> You do not need to restart the Netdata Agent between changes to health configuration files, see the relevant section on [reloading health configuration](/src/health/REFERENCE.md#reload-health-configuration).
+
+### Using `systemctl` or `service`
+
+This is the recommended way to start, stop, or restart the Netdata daemon.
+
+- To **start** Netdata, run `sudo systemctl start netdata`.
+- To **stop** Netdata, run `sudo systemctl stop netdata`.
+- To **restart** Netdata, run `sudo systemctl restart netdata`.
+
+If the above commands fail, or you know that you're using a non-systemd system, try using the `service` command:
+
+- Starting: `sudo service netdata start`.
+- Stopping: `sudo service netdata stop`.
+- Restarting: `sudo service netdata restart`.
+
+### Using the `netdata` command
+
+Use the `netdata` command, typically located at `/usr/sbin/netdata`, to start the Netdata daemon:
+
+```bash
+sudo netdata
+```
+
+If you start the daemon this way, close it with `sudo killall netdata`.
+
+### Shutdown using `netdatacli`
+
+The Netdata Agent also comes with a [CLI tool](/src/cli/README.md) capable of performing shutdowns. Start the Agent back up using your preferred method listed above.
+
+```bash
+sudo netdatacli shutdown-agent
+```
+
+## Starting Netdata at boot
+
+In the `system` directory you can find scripts and configurations for the
+various distros.
+
+### systemd
+
+The installer already installs `netdata.service` if it detects a systemd system.
+
+To install `netdata.service` by hand, run:
+
+```sh
+# stop Netdata
+killall netdata
+
+# copy netdata.service to systemd
+cp system/netdata.service /etc/systemd/system/
+
+# let systemd know there is a new service
+systemctl daemon-reload
+
+# enable Netdata at boot
+systemctl enable netdata
+
+# start Netdata
+systemctl start netdata
+```
+
+### init.d
+
+In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution's documentation. For Ubuntu, this can be done via running the following commands as root.
+
+```sh
+# copy the Netdata startup file to /etc/init.d
+cp system/netdata-lsb /etc/init.d/netdata
+
+# make sure it is executable
+chmod +x /etc/init.d/netdata
+
+# enable it
+update-rc.d netdata defaults
+```
+
+### openrc / Gentoo Linux
+
+In the `system` directory you can find `netdata-openrc`. Copy it to the proper
+place according to your distribution documentation.
+
+### CentOS / Red Hat Enterprise Linux
+
+For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can be installed by running the following commands as root.
+
+```sh
+# copy the Netdata startup file to /etc/init.d
+cp system/netdata-init-d /etc/init.d/netdata
+
+# make sure it is executable
+chmod +x /etc/init.d/netdata
+
+# enable it
+chkconfig --add netdata
+```
+
+_There have been some recent work on the init script, see the following PR <https://github.com/netdata/netdata/pull/403>_
+
+### Other operating systems
+
+You can start Netdata by running it from `/etc/rc.local` or your system's equivalent.
+
## Optional parameters to alter your installation
The `kickstart.sh` script accepts a number of optional parameters to control how the installation process works:
@@ -145,9 +240,9 @@ By default, the kickstart script will provide a Netdata agent installation that
- `--claim-url`
Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`. Use this option to change the Netdata Cloud URL to point to your Netdata Cloud installation.
- `--claim-rooms`
- Specify a comma-separated list of tokens for each War Room this node should appear in.
+ Specify a comma-separated list of tokens for each Room this node should appear in.
- `--claim-proxy`
- Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy) for details.
+ Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](/src/claim/README.md#connect-through-a-proxy) for details.
- `--claim-only`
If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally.
- `--require-cloud`
@@ -177,11 +272,12 @@ By default, the agent is sending anonymous telemetry data to help us take identi
Uninstall an existing installation of Netdata. Fails if there is no existing install.
### other options
+
- `--dry-run`
Show what the installer would do, but don’t actually do any of it.
- `--dont-start-it`
Don’t auto-start the daemon after installing. This parameter is not guaranteed to work.
-- `--override-distro`
+- `--distro-override`
Override the distro detection logic and assume the system is using a specific Linux distribution and release. Takes a single argument consisting of the values of the `ID`, `VERSION_ID`, and `VERSION_CODENAME` fields from `/etc/os-release` for the desired distribution.
The following options are mutually exclusive and specify special operations other than trying to install Netdata normally or update an existing install:
@@ -189,7 +285,7 @@ The following options are mutually exclusive and specify special operations othe
- `--repositories-only`
Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only.
- `--prepare-offline-install-source`
- Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md) for more info.
+ Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info.
### environment variables
@@ -204,10 +300,9 @@ should not need to use special values for any of these):
those to work, or have a different tool to do the same thing on your system, you can specify it here.
- `DISABLE_TELEMETRY`: If set to a value other than 0, behave as if `--disable-telemetry` was specified.
-
## Native packages
-We publish official DEB/RPM packages for a number of common Linux distributions as part of our releases and nightly
+We publish [official DEB/RPM packages](/packaging/installer/methods/packages.md) for a number of common Linux distributions as part of our releases and nightly
builds. These packages are available for 64-bit x86 systems. Depending on the distribution and release they may
also be available for 32-bit x86, ARMv7, and AArch64 systems. If a native package is available, it will be used as the
default installation method. This allows you to handle Netdata updates as part of your usual system update procedure.
@@ -217,7 +312,7 @@ you can do so by adding `--native-only` to the options you pass to the installer
## Static builds
-We publish pre-built static builds of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7,
+We publish pre-built [static builds](/packaging/makeself/README.md) of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7,
AArch64, and POWER8+ hardware. These static builds are able to operate in a mostly self-contained manner and only
require a POSIX compliant shell and a supported init system. These static builds install under `/opt/netdata`. If
you are on a platform which we provide static builds for but do not provide native packages for, a static build
diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md
index 17cb9f5e1..6a0dee98a 100644
--- a/packaging/installer/methods/kubernetes.md
+++ b/packaging/installer/methods/kubernetes.md
@@ -3,14 +3,14 @@ import TabItem from '@theme/TabItem';
# Install Netdata on Kubernetes
-This document details how to install Netdata on an existing Kubernetes (k8s) cluster, and connect it to Netdata Cloud. Read our [Kubernetes visualizations](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) documentation, to see what you will get.
+This document details how to install Netdata on an existing Kubernetes (k8s) cluster, and connect it to Netdata Cloud. Read our [Kubernetes visualizations](/docs/dashboards-and-charts/kubernetes-tab.md) documentation, to see what you will get.
The [Netdata Helm chart](https://github.com/netdata/helmchart/blob/master/charts/netdata/README.md) installs one `parent` pod for storing metrics and managing alert notifications, plus an additional
`child` pod for every node in the cluster, responsible for collecting metrics from the node, Kubernetes control planes,
pods/containers, and [supported application-specific
metrics](https://github.com/netdata/helmchart#service-discovery-and-supported-services).
-### Prerequisites
+## Prerequisites
To deploy Kubernetes monitoring with Netdata, you need:
@@ -97,7 +97,7 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne
> :bookmark_tabs: Note
>
> Make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space,
- > and `YOUR_ROOM_ID` with the ID of the room you are willing to connect to.
+ > and `YOUR_ROOM_ID` with the ID of the Room you are willing to connect to.
These settings connect your `parent`/`child` nodes to Netdata Cloud and store more metrics in the nodes' time-series databases.
@@ -191,10 +191,10 @@ helm upgrade netdata netdata/netdata
## What's next?
-[Start Kubernetes monitoring](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) in Netdata Cloud, which comes with meaningful visualizations out of the box.
+[Start Kubernetes monitoring](/docs/dashboards-and-charts/kubernetes-tab.md) in Netdata Cloud, which comes with meaningful visualizations out of the box.
### Related reference documentation
-- [Netdata Cloud · Kubernetes monitoring](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md)
+- [Netdata Cloud · Kubernetes monitoring](/docs/dashboards-and-charts/kubernetes-tab.md)
- [Netdata Helm chart](https://github.com/netdata/helmchart)
- [Netdata service discovery](https://github.com/netdata/agent-service-discovery/)
diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md
index 480a41283..31aaebf98 100644
--- a/packaging/installer/methods/macos.md
+++ b/packaging/installer/methods/macos.md
@@ -9,8 +9,8 @@ learn_rel_path: "Installation/Install on specific environments"
# Install Netdata on macOS
Netdata works on macOS, albeit with some limitations.
-The number of charts displaying system metrics is limited, but you can use any of Netdata's [external plugins](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md) to monitor any services you might have installed on your macOS system.
-You could also use a macOS system as the parent node in a [streaming configuration](https://github.com/netdata/netdata/blob/master/streaming/README.md).
+The number of charts displaying system metrics is limited, but you can use any of Netdata's [external plugins](/src/collectors/plugins.d/README.md) to monitor any services you might have installed on your macOS system.
+You could also use a macOS system as the parent node in a [streaming configuration](/src/streaming/README.md).
You can install Netdata in one of the three following ways:
@@ -21,39 +21,39 @@ You can install Netdata in one of the three following ways:
Each of these installation option requires [Homebrew](https://brew.sh/) for handling dependencies.
> The Netdata Homebrew package is community-created and -maintained.
-> Community-maintained packages _may_ receive support from Netdata, but are only a best-effort affair. Learn more about [Netdata's platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+> Community-maintained packages _may_ receive support from Netdata, but are only a best-effort affair. Learn more about [Netdata's platform support policy](/docs/netdata-agent/versions-and-platforms.md).
## Install Netdata with our automatic one-line installation script
**Local Netdata Agent installation**
-To install Netdata using our automatic [kickstart](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#automatic-one-line-installation-script) open a new terminal and run:
+To install Netdata using our automatic [kickstart](/packaging/installer/README.md#automatic-one-line-installation-script) open a new terminal and run:
```bash
-curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh
+curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh
```
The Netdata Agent is installed under `/usr/local/netdata`. Dependencies are handled via Homebrew.
**Automatically connect to Netdata Cloud during installation**
-The `kickstart.sh` script accepts additional parameters to automatically [connect](https://github.com/netdata/netdata/blob/master/claim/README.md) your node to Netdata
+The `kickstart.sh` script accepts additional parameters to automatically [connect](/src/claim/README.md) your node to Netdata
Cloud immediately after installation. Find the `token` and `rooms` strings by [signing in to Netdata
Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Connect Nodes** in the [Spaces management
-area](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/organize-your-infrastrucutre-invite-your-team.md#netdata-cloud-spaces).
+area](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-spaces).
- `--claim-token`: Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node
after the install.
-- `--claim-rooms`: Specify a comma-separated list of tokens for each War Room this node should appear in.
+- `--claim-rooms`: Specify a comma-separated list of tokens for each Room this node should appear in.
- `--claim-proxy`: Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy.
- See [connecting through a proxy](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy) for details.
+ See [connecting through a proxy](/src/claim/README.md#connect-through-a-proxy) for details.
- `--claim-url`: Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`.
For example:
```bash
-curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --install-prefix /usr/local/ --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud
+curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --install-prefix /usr/local/ --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud
```
The Netdata Agent is installed under `/usr/local/netdata` on your machine. Your machine will also show up as a node in your Netdata Cloud.
-If you experience issues while claiming your node, follow the steps in our [Troubleshooting](https://github.com/netdata/netdata/blob/master/claim/README.md#troubleshooting) documentation.
+If you experience issues while claiming your node, follow the steps in our [Troubleshooting](/src/claim/README.md#troubleshooting) documentation.
## Install Netdata via Homebrew
### For macOS Intel
@@ -79,10 +79,6 @@ Homebrew will place your Netdata configuration directory at `/opt/homebrew/etc/n
Use the `edit-config` script and the files in this directory to configure Netdata. For reference, you can find stock configuration files at `/opt/homebrew/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`.
-
-
-Skip on ahead to the [What's next?](#whats-next) section to find links to helpful post-installation guides.
-
## Install Netdata from source
We don't recommend installing Netdata from source on macOS, as it can be difficult to configure and install dependencies manually.
@@ -94,7 +90,7 @@ We don't recommend installing Netdata from source on macOS, as it can be difficu
```
2. Click **Install** on the Software Update popup window that appears.
-3. Use the same terminal session to install some of Netdata's prerequisites using Homebrew. If you don't want to use [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you can omit `cmake`.
+3. Use the same terminal session to install some of Netdata's prerequisites using Homebrew. If you don't want to use [Netdata Cloud](/docs/netdata-cloud/README.md), you can omit `cmake`.
```bash
brew install ossp-uuid autoconf automake pkg-config libuv lz4 json-c openssl libtool cmake
diff --git a/packaging/installer/methods/manual.md b/packaging/installer/methods/manual.md
index 269b67c1a..31bc392e5 100644
--- a/packaging/installer/methods/manual.md
+++ b/packaging/installer/methods/manual.md
@@ -23,7 +23,7 @@ To install the latest git version of Netdata, please follow these 2 steps:
## Prepare your system
Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
-Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+Otherwise, [perform a cleanup](/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
Use our automatic requirements installer (_no need to be `root`_), which attempts to find the packages that
should be installed on your system to build and run Netdata. It supports a large variety of major Linux distributions
@@ -40,7 +40,7 @@ and other operating systems and is regularly tested. You can find this tool [her
- Please note that for RHEL/CentOS you need
[EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/).
In addition, RHEL/CentOS version 6 also need
- [OKay](https://okay.com.mx/blog-news/rpm-repositories-for-centos-6-and-7.html) for package libuv version 1.
+ [OKay](https://okay.com.mx) for package libuv version 1.
- CentOS 8 / RHEL 8 requires a bit of extra work. See the dedicated section below.
- **SUSE** Linux and its derivatives (including **openSUSE**)
@@ -140,7 +140,7 @@ required if manually installing packages.
CentOS 6.x:
- Enable the EPEL repo
-- Enable the additional repo from [okay.network](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+- Enable the additional repo from [okay.network](https://okay.network)
And install the minimum required dependencies.
@@ -154,7 +154,7 @@ CentOS 8.x:
- Enable the PowerTools repo
- Enable the EPEL repo
-- Enable the Extra repo from [OKAY](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+- Enable the Extra repo from [OKAY](https://okay.network)
And install the minimum required dependencies:
@@ -205,22 +205,22 @@ cd netdata
- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
- `--stable-channel`: Automatically update only on the release of new major versions.
- `--nightly-channel`: Automatically update on every new nightly build.
-- `--disable-telemetry`: Opt-out of [anonymous statistics](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) we use to make
+- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) we use to make
Netdata better.
- `--no-updates`: Prevent automatic updates of any kind.
- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this
cannot be used to change installation types.
-- `--local-files`: Used for [offline installations](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md). Pass four file paths: the Netdata
+- `--local-files`: Used for [offline installations](/packaging/installer/methods/offline.md). Pass four file paths: the Netdata
tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force kickstart run the
process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_
`--stable-channel`, Netdata will use the local files.
### Connect node to Netdata Cloud during installation
-Unlike the [`kickstart.sh`](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md), the `netdata-installer.sh` script does
-not allow you to automatically [connect](https://github.com/netdata/netdata/blob/master/claim/README.md) your node to Netdata Cloud immediately after installation.
+Unlike the [`kickstart.sh`](/packaging/installer/methods/kickstart.md), the `netdata-installer.sh` script does
+not allow you to automatically [connect](/src/claim/README.md) your node to Netdata Cloud immediately after installation.
-See the [connect to cloud](https://github.com/netdata/netdata/blob/master/claim/README.md) doc for details on connecting a node with a manual installation of Netdata.
+See the [connect to cloud](/src/claim/README.md) doc for details on connecting a node with a manual installation of Netdata.
### 'nonrepresentable section on output' errors
diff --git a/packaging/installer/methods/methods.md b/packaging/installer/methods/methods.md
index f9ca2253e..bc6e879a8 100644
--- a/packaging/installer/methods/methods.md
+++ b/packaging/installer/methods/methods.md
@@ -12,15 +12,15 @@ sidebar_position: 30
Netdata can be installed:
-- [As a DEB/RPM package](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md)
-- [As a static binary](https://github.com/netdata/netdata/blob/master/packaging/makeself/README.md)
-- [From a git checkout](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md)
-- [As a docker container](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md)
+- [As a DEB/RPM package](/packaging/installer/methods/packages.md)
+- [As a static binary](/packaging/makeself/README.md)
+- [From a git checkout](/packaging/installer/methods/manual.md)
+- [As a docker container](/packaging/docker/README.md)
-The [one line installer kickstart.sh](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+The [one line installer kickstart.sh](/packaging/installer/methods/kickstart.md)
picks the most appropriate method out of the first three for any system
and is the recommended installation method, if you don't use containers.
`kickstart.sh` can also be used for
-[offline installation](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/offline.md),
+[offline installation](/packaging/installer/methods/offline.md),
suitable for air-gapped systems.
diff --git a/packaging/installer/methods/offline.md b/packaging/installer/methods/offline.md
index f2b6cc415..83155848f 100644
--- a/packaging/installer/methods/offline.md
+++ b/packaging/installer/methods/offline.md
@@ -31,13 +31,13 @@ be as a regular user from any internet connected system that has the following t
To prepare the offline installation source, simply run:
```bash
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
```
or
```bash
-curl https://my-netdata.io/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
+curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --prepare-offline-install-source ./netdata-offline
```
> The exact name used for the directory does not matter, you can specify any other name you want in place of `./netdata-offline`.
@@ -54,6 +54,6 @@ target system. This can be done in any manner you like, as long as filenames are
After copying the files, simply run the `install.sh` script located in the
offline install source directory. It accepts all the [same options as the kickstart
-script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) for further
+script](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) for further
customization of the installation, though it will default to not enabling automatic updates (as they are not
supported on offline installs).
diff --git a/packaging/installer/methods/packages.md b/packaging/installer/methods/packages.md
index d49e21394..90556c1ab 100644
--- a/packaging/installer/methods/packages.md
+++ b/packaging/installer/methods/packages.md
@@ -8,20 +8,18 @@ learn_rel_path: "Installation/Installation methods"
sidebar_position: 20
-->
-# Install Netdata using native DEB/RPM packages.
+# Install Netdata using native DEB/RPM packages
For most common Linux distributions that use either DEB or RPM packages, Netdata provides pre-built native packages
for current releases in-line with
-our [official platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+our [official platform support policy](/docs/netdata-agent/versions-and-platforms.md).
These packages will be used by default when attempting to install on a supported platform using our
-[kickstart.sh installer script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+[kickstart.sh installer script](/packaging/installer/methods/kickstart.md).
When using the kickstart script, you can force usage of native DEB or RPM packages by passing the option
`--native-only` when invoking the script. This will cause it to only attempt to use native packages for the install,
and fail if it cannot do so.
-
-
> ### Note
>
> In July 2022, we switched hosting of our native packages from Package Cloud to self-hosted repositories.
@@ -31,10 +29,9 @@ and fail if it cannot do so.
> When selecting a repository configuration package, note that the version 2 packages provide configuration for
> our self-hosted repositories, and then version 1 packages provide configuration for Package Cloud.
+## Manual setup of RPM packages
-## Manual setup of RPM packages.
-
-Netdata’s official RPM repositories are hosted at https://repo.netdata.cloud/repos. We provide four groups of
+Netdata’s official RPM repositories are hosted at <https://repo.netdata.cloud/repos>. We provide four groups of
repositories at that top level:
- `stable`: Contains packages for stable releases of the Netdata Agent.
@@ -55,14 +52,14 @@ Under each of those directories is a directory for each supported release of tha
directory for each supported CPU architecture which contains the actual repository.
For example, for stable release packages for RHEL 9 on 64-bit x86, the full URL for the repository would be
-https://repo.netdata.cloud/repos/stable/el/9/x86_64/
+<https://repo.netdata.cloud/repos/stable/el/9/x86_64/>
Our RPM packages and repository metadata are signed using a GPG key with a user name of ‘Netdatabot’. The
current key fingerprint is `6588FDD7B14721FE7C3115E6F9177B5265F56346`. The associated public key can be fetched from
`https://repo.netdata.cloud/netdatabot.gpg.key`.
If you are explicitly configuring a system to use our repositories, the recommended setup is to download the
-appropriate repository configuration package from https://repo.netdata.cloud/repos/repoconfig and install it
+appropriate repository configuration package from <https://repo.netdata.cloud/repos/repoconfig> and install it
directly on the target system using the system package manager. This will ensure any packages needed to use the
repository are also installed, and will help enable a seamless transition if we ever need to change our infrastructure.
@@ -73,9 +70,9 @@ repository are also installed, and will help enable a seamless transition if we
> repository _should_ be pulled in automatically by our repository config packages, but if it is not you may need
> to manually install `epel-release` to be able to successfully install the Netdata packages.
-## Manual setup of DEB packages.
+## Manual setup of DEB packages
-Netdata’s official DEB repositories are hosted at https://repo.netdata.cloud/repos. We provide four groups of
+Netdata’s official DEB repositories are hosted at <https://repo.netdata.cloud/repos>. We provide four groups of
repositories at that top level:
- `stable`: Contains packages for stable releases of the Netdata Agent.
@@ -105,7 +102,7 @@ current key fingerprint is `6588FDD7B14721FE7C3115E6F9177B5265F56346`. The assoc
`https://repo.netdata.cloud/netdatabot.gpg.key`.
If you are explicitly configuring a system to use our repositories, the recommended setup is to download the
-appropriate repository configuration package from https://repo.netdata.cloud/repos/repoconfig and install it
+appropriate repository configuration package from <https://repo.netdata.cloud/repos/repoconfig> and install it
directly on the target system using the system package manager. This will ensure any packages needed to use the
repository are also installed, and will help enable a seamless transition if we ever need to change our infrastructure.
diff --git a/packaging/installer/methods/source.md b/packaging/installer/methods/source.md
index 8f34218a2..c6ff6e6fe 100644
--- a/packaging/installer/methods/source.md
+++ b/packaging/installer/methods/source.md
@@ -13,7 +13,7 @@ sidebar_position: 100
These instructions are for advanced users and distribution package
maintainers. Unless this describes you, you almost certainly want
to follow [our guide for manually installing Netdata from a git
-checkout](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md) instead.
+checkout](/packaging/installer/methods/manual.md) instead.
## Required dependencies
@@ -23,11 +23,15 @@ to build and run successfully:
- libuuid
- libuv version 1.0 or newer
- zlib
-- GNU autoconf
-- GNU automake
+- CMake 3.13 or newer
- GCC or Xcode (Clang is known to have issues in certain configurations, see [Using Clang](#using-clang))
-- A version of `make` compatible with GNU automake
-- Git (we use git in the build system to generate version info, don't need a full install, just a working `git show` command)
+- Ninja or Make (Ninja is preferred as it results in significantly faster builds)
+- Git (we use git in the build system to generate version info, you don't need a full install, just a working `git show` command)
+
+The following additional dependencies are also needed, but will be prepared automatically by CMake if they are not available on the build system.
+
+- libyaml
+- JSON-C
Additionally, the following build time features require additional dependencies:
@@ -38,66 +42,36 @@ Additionally, the following build time features require additional dependencies:
- OpenSSL 1.0 or newer (LibreSSL _amy_ work, but is largely untested).
- Netdata Cloud support:
- A working internet connection
- - A recent version of CMake
- OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer.
- - JSON-C (may be provided by the user as shown below, or by the system)
- - protobuf (Google Protocol Buffers) and protoc compiler
+ - protobuf (Google Protocol Buffers) and protoc compiler. If protobuf is not available on the system,
+ CMake can be instructed to fetch and build a usable version for Netdata.
+- Netdata Go collectors:
+ - Go 1.21 or newer
## Preparing the source tree
-Certain features in Netdata require custom versions of specific libraries,
-which the the build system will link statically into Netdata. These
-libraries and their header files must be copied into specific locations
-in the source tree to be used.
-
-Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
-Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+Netdata uses Git submodules for some of it’s components, which must be fetched prior to building Netdata. If you
+are using a source tarball published by the Netdata project, then these are included. If you are using a checkout
+of the Git repository, you may need to explicitly fetch and update the submodules using `git submodule update
+--init --recursive`.
### Netdata cloud
-#### JSON-C
-
-Netdata requires the use of JSON-C for JSON parsing when using Netdata
-Cloud. Netdata is able to use a system-provided copy of JSON-C, but
-some systems may not provide it. If your system does not provide JSON-C,
-you can do the following to prepare a copy for the build system:
-
-1. Verify the tag that Netdata expects to be used by checking the contents
- of `packaging/jsonc.version` in your Netdata sources.
-2. Obtain the sources for that version by either:
- - Navigating to https://github.com/json-c/json-c and downloading
- and unpacking the source code archive for that release.
- - Cloning the repository with `git` and checking out the required tag.
-3. Prepare the JSON-C sources by running `cmake -DBUILD_SHARED_LIBS=OFF .`
- in the JSON-C source directory.
-4. Build JSON-C by running `make` in the JSON-C source directory.
-5. In the Netdata source directory, create a directory called
- `externaldeps/jsonc`.
-6. Copy `libjson-c.a` from the JSON-C source directory to
- `externaldeps/jsonc/libjson-c.a` in the Netdata source tree.
-7. Copy all of the header files (`*.h`) from the JSON-C source directory
- to `externaldeps/jsonc/json-c` in the Netdata source tree.
-
## Building Netdata
Once the source tree has been prepared, Netdata is ready to be configured
-and built. Netdata currently uses GNU autotools as it's primary build
-system. To build Netdata this way:
+and built. Netdata uses CMake for configuration, and strongly prefers
+the use of an external build directory. To configure and build Netdata:
-1. Run `autoreconf -ivf` in the Netdata source tree.
-2. Run `./configure` in the Netdata source tree.
-3. Run `make` in the Netdata source tree.
+1. Run `cmake -S . -B build -G Ninja` in the source tree. `build` can be replaced with whatever path you want for the build directory. If you wish to use Make instead of Ninja for the build, remove the `-G Ninja` from the command.
+2. Run `cmake --build build`, where `build` is the build directory. CMake’s `--parallel` option can be used to control the number of build jobs that are used.
+3. Run `cmake --install build`, where `build` is the build directory.
### Configure options
-Netdata provides a number of build time configure options. This section
-lists some of the ones you are most likely to need:
-
-- `--prefix`: Specify the prefix under which Netdata will be installed.
-- `--with-webdir`: Specify a path relative to the prefix in which to
- install the web UI files.
-- `--disable-cloud`: Disables all Netdata Cloud functionality for
- this build.
+Netdata’s CMake build infrastructure intentionally does very little auto-detection, and requires most components
+to be explicitly enabled or disabled. A full list of available configuration options for a given version of Netdata,
+with help descriptions, can be seen by running `cmake -LH` in the source tree.
### Using Clang
@@ -123,92 +97,6 @@ A full featured install of Netdata requires some additional components
which must be built and installed separately from the main Netdata
agent. All of these should be handled _after_ installing Netdata itself.
-### React dashboard
-
-The above build steps include a deprecated web UI for Netdata that lacks
-support for Netdata Cloud. To get a fully featured dashboard, you must
-install our new React dashboard.
-
-#### Installing the pre-built React dashboard
-
-We provide pre-built archives of the React dashboard for each release
-(these are also used during our normal install process). To use one
-of these:
-
-1. Verify the release version that Netdata expects to be used by checking
- the contents of `packaging/dashboard.version` in your Netdata sources.
-2. Go to https://github.com/netdata/dashboard/releases and download the
- `dashboard.tar.gz` file for the required release.
-3. Unpack the downloaded archive to a temporary directory.
-4. Copy the contents of the `build` directory from the extracted
- archive to `/usr/share/netdata/web` or the equivalent location for
- your build of Netdata. This _will_ overwrite some files in the target
- location.
-
-#### Building the React dashboard locally
-
-Alternatively, you may wish to build the React dashboard locally. Doing
-so requires a recent version of Node.JS with a working install of
-NPM. Once you have the required tools, do the following:
-
-1. Verify the release version that Netdata expects to be used by checking
- the contents of `packaging/dashboard.version` in your Netdata sources.
-2. Obtain the sources for that version by either:
- - Navigating to https://github.com/netdata/dashboard and downloading
- and unpacking the source code archive for that release.
- - Cloning the repository with `git` and checking out the required tag.
-3. Run `npm install` in the dashboard source tree.
-4. Run `npm run build` in the dashboard source tree.
-5. Copy the contents of the `build` directory just like step 4 of
- installing the pre-built React dashboard.
-
-### Go collectors
-
-A number of the collectors for Netdata are written in Go instead of C,
-and are developed in a separate repository from the mian Netdata code.
-An installation without these collectors is still usable, but will be
-unable to collect metrics for a number of network services the system
-may be providing. You can either install a pre-built copy of these
-collectors, or build them locally.
-
-#### Installing the pre-built Go collectors
-
-We provide pre-built binaries of the Go collectors for all the platforms
-we officially support. To use one of these:
-
-1. Verify the release version that Netdata expects to be used by checking
- the contents of `packaging/go.d.version` in your Netdata sources.
-2. Go to https://github.com/netdata/go.d.plugin/releases, select the
- required release, and download the `go.d.plugin-*.tar.gz` file
- for your system type and CPu architecture and the `config.tar.gz`
- configuration file archive.
-3. Extract the `go.d.plugin-*.tar.gz` archive into a temporary
- location, and then copy the single file in the archive to
- `/usr/libexec/netdata/plugins.d` or the equivalent location for your
- build of Netdata and rename it to `go.d.plugin`.
-4. Extract the `config.tar.gz` archive to a temporarylocation and then
- copy the contents of the archive to `/etc/netdata` or the equivalent
- location for your build of Netdata.
-
-#### Building the Go collectors locally
-
-Alternatively, you may wish to build the Go collectors locally
-yourself. Doing so requires a working installation of Golang 1.13 or
-newer. Once you have the required tools, do the following:
-
-1. Verify the release version that Netdata expects to be used by checking
- the contents of `packaging/go.d.version` in your Netdata sources.
-2. Obtain the sources for that version by either:
- - Navigating to https://github.com/netdata/go.d.plugin and downloading
- and unpacking the source code archive for that release.
- - Cloning the repository with `git` and checking out the required tag.
-3. Run `make` in the go.d.plugin source tree.
-4. Copy `bin/godplugin` to `/usr/libexec/netdata/plugins.d` or th
- equivalent location for your build of Netdata and rename it to
- `go.d.plugin`.
-5. Copy the contents of the `config` directory to `/etc/netdata` or the
- equivalent location for your build of Netdata.
-
### eBPF collector
On Linux systems, Netdata has support for using the kernel's eBPF
diff --git a/packaging/installer/methods/synology.md b/packaging/installer/methods/synology.md
index 3910859b4..742b3abb0 100644
--- a/packaging/installer/methods/synology.md
+++ b/packaging/installer/methods/synology.md
@@ -18,7 +18,7 @@ learn_rel_path: "Installation/Install on specific environments"
The good news is that our
-[one-line installation script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+[one-line installation script](/packaging/installer/methods/kickstart.md)
works fine if your NAS is one that uses the amd64 architecture. It
will install the content into `/opt/netdata`, making future removal safe and simple.
@@ -27,23 +27,22 @@ will install the content into `/opt/netdata`, making future removal safe and sim
When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other
installations run it as the `netdata` user, you might wish to do the same. This requires some extra work:
-1. Create a group `netdata` via the Synology group interface. Give it no access to anything.
-2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign
+1. Create a group `netdata` via the Synology group interface. Give it no access to anything.
+2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign
the user to the `netdata` group. Netdata will chuid to this user when running.
-3. Change ownership of the following directories, as defined in
- [Netdata Security](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#security-design):
+3. Change ownership of the following directories:
-```sh
-chown -R root:netdata /opt/netdata/usr/share/netdata
-chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata
-chown -R netdata:root /opt/netdata/var/log/netdata
-```
+ ```sh
+ chown -R root:netdata /opt/netdata/usr/share/netdata
+ chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata
+ chown -R netdata:root /opt/netdata/var/log/netdata
+ ```
4. Restart Netdata
-```sh
-/etc/rc.netdata restart
-```
+ ```sh
+ /etc/rc.netdata restart
+ ```
## Create startup script
@@ -59,6 +58,6 @@ installed. You'll have to do this manually:
[ -x /etc/rc.netdata ] && /etc/rc.netdata start
```
-3. Make sure `/etc/rc.netdata` is executable: `chmod 0755 /etc/rc.netdata`.
+3. Make sure `/etc/rc.local` is executable: `chmod 0755 /etc/rc.local`.
diff --git a/packaging/installer/methods/systems.md b/packaging/installer/methods/systems.md
index e53c4f4a0..8715a57af 100644
--- a/packaging/installer/methods/systems.md
+++ b/packaging/installer/methods/systems.md
@@ -11,8 +11,8 @@ learn_rel_path: "Installation/Install on specific environments"
This category contains specific instructions for some popular environments.
If you have a standard environment that is not yet listed here, just use the
-[one line installer kickstart.sh](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md)
+[one line installer kickstart.sh](/packaging/installer/methods/kickstart.md)
If your environment is somewhat old or unusual, check our
-[platform support policy](https://github.com/netdata/netdata/blob/master/packaging/PLATFORM_SUPPORT.md).
+[platform support policy](/docs/netdata-agent/versions-and-platforms.md).
diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh
index 4326ebe25..c69bbd56e 100755
--- a/packaging/installer/netdata-uninstaller.sh
+++ b/packaging/installer/netdata-uninstaller.sh
@@ -520,6 +520,15 @@ portable_del_user_from_group() {
groupname="${1}"
username="${2}"
+ if command -v getent > /dev/null 2>&1; then
+ getent group "${1:-""}" | grep -q "${2}"
+ else
+ grep "^${1}:" /etc/group | grep -q "${2}"
+ fi
+
+ ret=$?
+ [ "${ret}" != "0" ] && return 0
+
# username is not in group
info "Deleting ${username} user from ${groupname} group ..."
@@ -714,8 +723,13 @@ trap quit_msg EXIT
info "Stopping a possibly running netdata..."
stop_all_netdata
+if [ "$(uname -s)" = "Darwin" ]; then
+ launchctl unload /Library/LaunchDaemons/com.github.netdata.plist 2>/dev/null
+fi
+
#### REMOVE NETDATA FILES
rm_file /etc/logrotate.d/netdata
+rm_file /usr/lib/systemd/journald@netdata.conf.d/netdata.conf
rm_file /etc/systemd/system/netdata.service
rm_file /lib/systemd/system/netdata.service
rm_file /usr/lib/systemd/system/netdata.service
@@ -732,6 +746,7 @@ rm_file /etc/periodic/daily/netdata-updater
rm_file /etc/cron.daily/netdata-updater
rm_file /etc/cron.d/netdata-updater
rm_file /etc/cron.d/netdata-updater-daily
+rm_file /Library/LaunchDaemons/com.github.netdata.plist
if [ -n "${NETDATA_PREFIX}" ] && [ -d "${NETDATA_PREFIX}" ] && [ "netdata" = "$(basename "$NETDATA_PREFIX")" ] ; then
@@ -750,6 +765,7 @@ else
rm_dir "${NETDATA_PREFIX}/var/cache/netdata"
rm_dir "${NETDATA_PREFIX}/var/log/netdata"
rm_dir "${NETDATA_PREFIX}/etc/netdata"
+ rm_dir /usr/lib/systemd/journald@netdata.conf.d/
fi
if [ -n "${tmpdir}" ]; then
@@ -758,8 +774,10 @@ fi
FILE_REMOVAL_STATUS=1
-#### REMOVE NETDATA USER FROM ADDED GROUPS
-if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then
+#### REMOVE USER
+if user_input "Do you want to delete 'netdata' system user ? "; then
+ portable_del_user "netdata" || :
+elif [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then
if user_input "Do you want to delete 'netdata' from following groups: '$NETDATA_ADDED_TO_GROUPS' ? "; then
for group in $NETDATA_ADDED_TO_GROUPS; do
portable_del_user_from_group "${group}" "netdata"
@@ -767,11 +785,6 @@ if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then
fi
fi
-#### REMOVE USER
-if user_input "Do you want to delete 'netdata' system user ? "; then
- portable_del_user "netdata" || :
-fi
-
### REMOVE GROUP
if user_input "Do you want to delete 'netdata' system group ? "; then
portable_del_group "netdata" || :
diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh
index 80faea0a0..fc8b39cdd 100755
--- a/packaging/installer/netdata-updater.sh
+++ b/packaging/installer/netdata-updater.sh
@@ -28,7 +28,7 @@
# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
# Author: Austin S. Hemmelgarn <austin@netdata.cloud>
-# Next unused error code: U001B
+# Next unused error code: U001D
set -e
@@ -36,10 +36,12 @@ PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packag
NETDATA_STABLE_BASE_URL="${NETDATA_BASE_URL:-https://github.com/netdata/netdata/releases}"
NETDATA_NIGHTLY_BASE_URL="${NETDATA_BASE_URL:-https://github.com/netdata/netdata-nightlies/releases}"
+NETDATA_DEFAULT_ACCEPT_MAJOR_VERSIONS="1 2"
# Following variables are intended to be overridden by the updater config file.
NETDATA_UPDATER_JITTER=3600
NETDATA_NO_SYSTEMD_JOURNAL=0
+NETDATA_ACCEPT_MAJOR_VERSIONS=''
script_dir="$(CDPATH='' cd -- "$(dirname -- "$0")" && pwd -P)"
@@ -147,6 +149,43 @@ issystemd() {
return 1
}
+# shellcheck disable=SC2009
+running_under_anacron() {
+ pid="${1:-$$}"
+ iter="${2:-0}"
+
+ [ "${iter}" -gt 50 ] && return 1
+
+ if [ "$(uname -s)" = "Linux" ] && [ -r "/proc/${pid}/stat" ]; then
+ ppid="$(cut -f 4 -d ' ' "/proc/${pid}/stat")"
+ if [ -n "${ppid}" ]; then
+ # The below case accounts for the hidepid mount option for procfs, as well as setups with LSM
+ [ ! -r "/proc/${ppid}/comm" ] && return 1
+
+ [ "${ppid}" -eq "${pid}" ] && return 1
+
+ grep -q anacron "/proc/${ppid}/comm" && return 0
+
+ running_under_anacron "${ppid}" "$((iter + 1))"
+
+ return "$?"
+ fi
+ else
+ ppid="$(ps -o pid= -o ppid= 2>/dev/null | grep -e "^ *${pid}" | xargs | cut -f 2 -d ' ')"
+ if [ -n "${ppid}" ]; then
+ [ "${ppid}" -eq "${pid}" ] && return 1
+
+ ps -o pid= -o command= 2>/dev/null | grep -e "^ *${ppid}" | grep -q anacron && return 0
+
+ running_under_anacron "${ppid}" "$((iter + 1))"
+
+ return "$?"
+ fi
+ fi
+
+ return 1
+}
+
_get_intervaldir() {
if [ -d /etc/cron.daily ]; then
echo /etc/cron.daily
@@ -171,6 +210,38 @@ _get_scheduler_type() {
fi
}
+confirm() {
+ prompt="${1} [y/n]"
+
+ while true; do
+ echo "${prompt}"
+ read -r yn
+
+ case "$yn" in
+ [Yy]*) return 0;;
+ [Nn]*) return 1;;
+ *) echo "Please answer yes or no.";;
+ esac
+ done
+}
+
+warn_major_update() {
+ nmv_suffix="New major versions generally involve breaking changes, and may not work in the same way as older versions."
+
+ if [ "${INTERACTIVE}" -eq 0 ]; then
+ warning "Would update to a new major version of Netdata. ${nmv_suffix}"
+ warning "To install the new major version anyway, either run the updater interactively, or include the new major version number in the NETDATA_ACCEPT_MAJOR_VERSIONS variable in ${UPDATER_CONFIG_PATH}."
+ fatal "Aborting update to new major version to avoid breaking things." U001B
+ else
+ warning "This update will install a new major version of Netdata. ${nmv_suffix}"
+ if confirm "Are you sure you want to update to a new major version of Netdata?"; then
+ notice "User accepted update to new major version of Netdata."
+ else
+ fatal "Aborting update to new major version at user request." U001C
+ fi
+ fi
+}
+
install_build_dependencies() {
bash="$(command -v bash 2> /dev/null)"
@@ -362,15 +433,42 @@ check_for_curl() {
_safe_download() {
url="${1}"
dest="${2}"
+ succeeded=0
+ checked=0
+
+ if echo "${url}" | grep -Eq "^file:///"; then
+ run cp "${url#file://}" "${dest}" || return 1
+ return 0
+ fi
check_for_curl
if [ -n "${curl}" ]; then
- "${curl}" -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
- return $?
- elif command -v wget > /dev/null 2>&1; then
- wget -T 15 -O - "${url}" > "${dest}"
- return $?
+ checked=1
+
+ if "${curl}" -fsSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"; then
+ succeeded=1
+ else
+ rm -f "${dest}"
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 0 ]; then
+ if command -v wget > /dev/null 2>&1; then
+ checked=1
+
+ if wget -T 15 -O - "${url}" > "${dest}"; then
+ succeeded=1
+ else
+ rm -f "${dest}"
+ fi
+ fi
+ fi
+
+ if [ "${succeeded}" -eq 1 ]; then
+ return 0
+ elif [ "${checked}" -eq 1 ]; then
+ return 1
else
return 255
fi
@@ -394,26 +492,58 @@ download() {
get_netdata_latest_tag() {
url="${1}/latest"
- dest="${2}"
check_for_curl
if [ -n "${curl}" ]; then
- tag=$("${curl}" "${url}" -s -L -I -o /dev/null -w '%{url_effective}' | grep -m 1 -o '[^/]*$')
- elif command -v wget >/dev/null 2>&1; then
- tag=$(wget -S -O /dev/null "${url}" 2>&1 | grep -m 1 Location | grep -o '[^/]*$')
- else
+ tag=$("${curl}" "${url}" -s -L -I -o /dev/null -w '%{url_effective}')
+ fi
+
+ if [ -z "${tag}" ]; then
+ if command -v wget >/dev/null 2>&1; then
+ tag=$(wget -S -O /dev/null "${url}" 2>&1 | grep Location)
+ fi
+ fi
+
+ if [ -z "${tag}" ]; then
fatal "I need curl or wget to proceed, but neither of them are available on this system." U0006
fi
- echo "${tag}" >"${dest}"
+ tag="$(echo "${tag}" | grep -Eom 1 '[^/]*/?$')"
+
+ # Fallback case for simpler local testing.
+ if echo "${tag}" | grep -Eq 'latest/?$'; then
+ if _safe_download "${url}/latest-version.txt" ./ndupdate-version.txt; then
+ tag="$(cat ./ndupdate-version.txt)"
+
+ if grep -q 'Not Found' ./ndupdate-version.txt; then
+ tag="latest"
+ fi
+
+ rm -f ./ndupdate-version.txt
+ else
+ tag="latest"
+ fi
+ fi
+
+ echo "${tag}"
}
newer_commit_date() {
info "Checking if a newer version of the updater script is available."
commit_check_url="https://api.github.com/repos/netdata/netdata/commits?path=packaging%2Finstaller%2Fnetdata-updater.sh&page=1&per_page=1"
- python_version_check="from __future__ import print_function;import sys,json;data = json.load(sys.stdin);print(data[0]['commit']['committer']['date'] if isinstance(data, list) else '')"
+ python_version_check="
+from __future__ import print_function
+import sys, json
+
+try:
+ data = json.load(sys.stdin)
+except:
+ print('')
+else:
+ print(data[0]['commit']['committer']['date'] if isinstance(data, list) and data else '')
+"
if command -v jq > /dev/null 2>&1; then
commit_date="$(_safe_download "${commit_check_url}" /dev/stdout | jq '.[0].commit.committer.date' 2>/dev/null | tr -d '"')"
@@ -494,9 +624,9 @@ parse_version() {
get_latest_version() {
if [ "${RELEASE_CHANNEL}" = "stable" ]; then
- get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}" /dev/stdout
+ get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}"
else
- get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}" /dev/stdout
+ get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}"
fi
}
@@ -513,6 +643,7 @@ update_available() {
info "Force update requested"
return 0
fi
+
basepath="$(dirname "$(dirname "$(dirname "${NETDATA_LIB_DIR}")")")"
searchpath="${basepath}/bin:${basepath}/sbin:${basepath}/usr/bin:${basepath}/usr/sbin:${PATH}"
searchpath="${basepath}/netdata/bin:${basepath}/netdata/sbin:${basepath}/netdata/usr/bin:${basepath}/netdata/usr/sbin:${searchpath}"
@@ -542,6 +673,27 @@ update_available() {
return 1
else
info "Update available"
+
+ if [ "${current_version}" -ne 0 ] && [ "${latest_version}" -ne 0 ]; then
+ current_major="$(${ndbinary} -v | cut -f 2 -d ' ' | cut -f 1 -d '.' | tr -d 'v')"
+ latest_major="$(echo "${latest_tag}" | cut -f 1 -d '.' | tr -d 'v')"
+
+ if [ "${current_major}" -ne "${latest_major}" ]; then
+ update_safe=0
+
+ for v in ${NETDATA_ACCEPT_MAJOR_VERSIONS}; do
+ if [ "${latest_major}" -eq "${v}" ]; then
+ update_safe=1
+ break
+ fi
+ done
+
+ if [ "${update_safe}" -eq 0 ]; then
+ warn_major_update
+ fi
+ fi
+ fi
+
return 0
fi
}
@@ -553,18 +705,23 @@ set_tarball_urls() {
if [ -e /opt/netdata/etc/netdata/.install-type ]; then
# shellcheck disable=SC1091
. /opt/netdata/etc/netdata/.install-type
+ [ -z "${PREBUILT_ARCH:-}" ] && PREBUILT_ARCH="$(uname -m)"
filename="netdata-${PREBUILT_ARCH}-latest.gz.run"
else
filename="netdata-x86_64-latest.gz.run"
fi
fi
- if [ "$1" = "stable" ]; then
- latest="$(get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}" /dev/stdout)"
+ if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ path="$(cd "${NETDATA_OFFLINE_INSTALL_SOURCE}" || exit 1; pwd)"
+ export NETDATA_TARBALL_URL="file://${path}/${filename}"
+ export NETDATA_TARBALL_CHECKSUM_URL="file://${path}/sha256sums.txt"
+ elif [ "$1" = "stable" ]; then
+ latest="$(get_netdata_latest_tag "${NETDATA_STABLE_BASE_URL}")"
export NETDATA_TARBALL_URL="${NETDATA_STABLE_BASE_URL}/download/$latest/${filename}"
export NETDATA_TARBALL_CHECKSUM_URL="${NETDATA_STABLE_BASE_URL}/download/$latest/sha256sums.txt"
else
- tag="$(get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}" /dev/stdout)"
+ tag="$(get_netdata_latest_tag "${NETDATA_NIGHTLY_BASE_URL}")"
export NETDATA_TARBALL_URL="${NETDATA_NIGHTLY_BASE_URL}/download/${tag}/${filename}"
export NETDATA_TARBALL_CHECKSUM_URL="${NETDATA_NIGHTLY_BASE_URL}/download/${tag}/sha256sums.txt"
fi
@@ -713,6 +870,15 @@ update_static() {
exit 0
}
+get_new_binpkg_major() {
+ case "${pm_cmd}" in
+ apt-get) apt-get --just-print upgrade 2>&1 | grep Inst | grep ' netdata ' | cut -f 3 -d ' ' | tr -d '[]' | cut -f 1 -d '.' ;;
+ yum) yum check-update netdata | grep -E '^netdata ' | awk '{print $2}' | cut -f 1 -d '.' ;;
+ dnf) dnf check-update netdata | grep -E '^netdata ' | awk '{print $2}' | cut -f 1 -d '.' ;;
+ zypper) zypper list-updates | grep '| netdata |' | cut -f 5 -d '|' | tr -d ' ' | cut -f 1 -d '.' ;;
+ esac
+}
+
update_binpkg() {
os_release_file=
if [ -s "/etc/os-release" ] && [ -r "/etc/os-release" ]; then
@@ -823,6 +989,24 @@ update_binpkg() {
fi
done
+ current_major="$(netdata -v | cut -f 2 -d ' ' | cut -f 1 -d '.' | tr -d 'v')"
+ latest_major="$(get_new_binpkg_major)"
+
+ if [ -n "${latest_major}" ] && [ "${latest_major}" -ne "${current_major}" ]; then
+ update_safe=0
+
+ for v in ${NETDATA_ACCEPT_MAJOR_VERSIONS}; do
+ if [ "${latest_major}" -eq "${v}" ]; then
+ update_safe=1
+ break
+ fi
+ done
+
+ if [ "${update_safe}" -eq 0 ]; then
+ warn_major_update
+ fi
+ fi
+
# shellcheck disable=SC2086
env ${env} ${pm_cmd} ${upgrade_subcmd} ${pkg_install_opts} netdata >&3 2>&3 || fatal "Failed to update Netdata package." U000F
@@ -901,11 +1085,14 @@ if [ -r "$(dirname "${ENVIRONMENT_FILE}")/.install-type" ]; then
. "$(dirname "${ENVIRONMENT_FILE}")/.install-type" || fatal "Failed to source $(dirname "${ENVIRONMENT_FILE}")/.install-type" U0015
fi
-if [ -r "$(dirname "${ENVIRONMENT_FILE}")/netdata-updater.conf" ]; then
+UPDATER_CONFIG_PATH="$(dirname "${ENVIRONMENT_FILE}")/netdata-updater.conf"
+if [ -r "${UPDATER_CONFIG_PATH}" ]; then
# shellcheck source=/dev/null
- . "$(dirname "${ENVIRONMENT_FILE}")/netdata-updater.conf"
+ . "${UPDATER_CONFIG_PATH}"
fi
+[ -z "${NETDATA_ACCEPT_MAJOR_VERSIONS}" ] && NETDATA_ACCEPT_MAJOR_VERSIONS="${NETDATA_DEFAULT_ACCEPT_MAJOR_VERSIONS}"
+
while [ -n "${1}" ]; do
case "${1}" in
--not-running-from-cron) NETDATA_NOT_RUNNING_FROM_CRON=1 ;;
@@ -913,6 +1100,10 @@ while [ -n "${1}" ]; do
--force-update) NETDATA_FORCE_UPDATE=1 ;;
--non-interactive) INTERACTIVE=0 ;;
--interactive) INTERACTIVE=1 ;;
+ --offline-install-source)
+ NETDATA_OFFLINE_INSTALL_SOURCE="${2}"
+ shift 1
+ ;;
--tmpdir-path)
NETDATA_TMPDIR_PATH="${2}"
shift 1
@@ -931,6 +1122,18 @@ while [ -n "${1}" ]; do
shift 1
done
+if [ -n "${NETDATA_OFFLINE_INSTALL_SOURCE}" ]; then
+ NETDATA_NO_UPDATER_SELF_UPDATE=1
+ NETDATA_UPDATER_JITTER=0
+ NETDATA_FORCE_UPDATE=1
+fi
+
+# If we seem to be running under anacron, act as if we’re not running from cron.
+# This is mostly to disable jitter, which should not be needed when run from anacron.
+if running_under_anacron; then
+ NETDATA_NOT_RUNNING_FROM_CRON="${NETDATA_NOT_RUNNING_FROM_CRON:-1}"
+fi
+
# Random sleep to alleviate stampede effect of Agents upgrading
# and disconnecting/reconnecting at the same time (or near to).
# But only we're not a controlling terminal (tty)
diff --git a/packaging/installer/package-windows.sh b/packaging/installer/package-windows.sh
new file mode 100755
index 000000000..7b1c57e46
--- /dev/null
+++ b/packaging/installer/package-windows.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+repo_root="$(dirname "$(dirname "$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd -P)")")"
+
+if [ -n "${BUILD_DIR}" ]; then
+ build="${BUILD_DIR}"
+elif [ -n "${OSTYPE}" ]; then
+ if [ -n "${MSYSTEM}" ]; then
+ build="${repo_root}/build-${OSTYPE}-${MSYSTEM}"
+ else
+ build="${repo_root}/build-${OSTYPE}"
+ fi
+elif [ "$USER" = "vk" ]; then
+ build="${repo_root}/build"
+else
+ build="${repo_root}/build"
+fi
+
+set -exu -o pipefail
+
+${GITHUB_ACTIONS+echo "::group::Installing"}
+cmake --install "${build}"
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+if [ ! -f "/msys2-installer.exe" ]; then
+ ${GITHUB_ACTIONS+echo "::group::Fetching MSYS2 installer"}
+ "${repo_root}/packaging/windows/fetch-msys2-installer.py" /msys2-installer.exe
+ ${GITHUB_ACTIONS+echo "::endgroup::"}
+fi
+
+${GITHUB_ACTIONS+echo "::group::Packaging"}
+NDVERSION=$"$(grep 'CMAKE_PROJECT_VERSION:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+NDMAJORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MAJOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+NDMINORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MINOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+
+if [ -f "/gpl-3.0.txt" ]; then
+ ${GITHUB_ACTIONS+echo "::group::Fetching GPL3 License"}
+ curl -o /gpl-3.0.txt "https://www.gnu.org/licenses/gpl-3.0.txt"
+ ${GITHUB_ACTIONS+echo "::endgroup::"}
+fi
+
+/mingw64/bin/makensis.exe -DCURRVERSION="${NDVERSION}" -DMAJORVERSION="${NDMAJORVERSION}" -DMINORVERSION="${NDMINORVERSION}" "${repo_root}/packaging/windows/installer.nsi"
+${GITHUB_ACTIONS+echo "::endgroup::"}
diff --git a/packaging/jsonc.checksums b/packaging/jsonc.checksums
deleted file mode 100644
index 6005f455c..000000000
--- a/packaging/jsonc.checksums
+++ /dev/null
@@ -1 +0,0 @@
-ec4eb70e0f6c0d707b9b1ec646cf7c860f4abb3562a90ea6e4d78d177fd95303 json-c-0.14-20200419.tar.gz
diff --git a/packaging/jsonc.version b/packaging/jsonc.version
deleted file mode 100644
index 29b561991..000000000
--- a/packaging/jsonc.version
+++ /dev/null
@@ -1 +0,0 @@
-0.14-20200419
diff --git a/packaging/libbpf_0_0_9.checksums b/packaging/libbpf_0_0_9.checksums
deleted file mode 100644
index d4ff87a12..000000000
--- a/packaging/libbpf_0_0_9.checksums
+++ /dev/null
@@ -1 +0,0 @@
-fc33402ba33c8f8c5aa18afbb86a9932965886f2906c50e8f2110a1a2126e3ee v0.0.9_netdata-1.tar.gz
diff --git a/packaging/libbpf_0_0_9.version b/packaging/libbpf_0_0_9.version
deleted file mode 100644
index d2362909d..000000000
--- a/packaging/libbpf_0_0_9.version
+++ /dev/null
@@ -1 +0,0 @@
-0.0.9_netdata-1
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
index 3d759ecf0..612c7d0d7 100644
--- a/packaging/maintainers/README.md
+++ b/packaging/maintainers/README.md
@@ -33,7 +33,7 @@ This page tracks the package maintainers for Netdata, for various operating syst
| System | URL | Core Developer | Package Maintainer
|:-:|:-:|:-:|:-:|
-| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
+| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb)|@vlvkobal|@rickard-von-essen
---
@@ -48,7 +48,6 @@ This page tracks the package maintainers for Netdata, for various operating syst
| Embedded Linux | Netdata Version | Maintainer | Related URL |
| :-: | :-: | :-: | :-- |
-| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 |
| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) |
| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata |
| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 |
diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md
index 1f2c746bf..d1c492f62 100644
--- a/packaging/makeself/README.md
+++ b/packaging/makeself/README.md
@@ -26,7 +26,7 @@ you can do so by adding `--static-only` to the options you pass to the installer
## Building a static binary package
Before you begin, make sure that your repo and the repo's submodules are clean from any previous builds and up to date.
-Otherwise, [perform a cleanup](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
+Otherwise, [perform a cleanup](/packaging/installer/methods/manual.md#perform-a-cleanup-in-your-netdata-repo)
To build the static binary 64-bit distribution package, into the root folder on the netdata repo, run:
diff --git a/packaging/makeself/build-static.sh b/packaging/makeself/build-static.sh
index 0c46c12af..7161cfcda 100755
--- a/packaging/makeself/build-static.sh
+++ b/packaging/makeself/build-static.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -26,8 +26,13 @@ fi
DOCKER_IMAGE_NAME="netdata/static-builder:v1"
-if [ "${BUILDARCH}" != "$(uname -m)" ] && [ "$(uname -m)" = 'x86_64' ] && [ -z "${SKIP_EMULATION}" ]; then
- ${docker} run --rm --privileged multiarch/qemu-user-static --reset -p yes || exit 1
+if [ "${BUILDARCH}" != "$(uname -m)" ] && [ -z "${SKIP_EMULATION}" ]; then
+ if [ "$(uname -m)" = "x86_64" ]; then
+ ${docker} run --rm --privileged multiarch/qemu-user-static --reset -p yes || exit 1
+ else
+ echo "Automatic cross-architecture builds are only supported on x86_64 hosts."
+ exit 1
+ fi
fi
if ${docker} inspect "${DOCKER_IMAGE_NAME}" > /dev/null 2>&1; then
@@ -49,10 +54,11 @@ fi
# Run the build script inside the container
if [ -t 1 ]; then
run ${docker} run --rm -e BUILDARCH="${BUILDARCH}" -a stdin -a stdout -a stderr -i -t -v "$(pwd)":/netdata:rw \
- "${DOCKER_IMAGE_NAME}" \
- /bin/sh /netdata/packaging/makeself/build.sh "${@}"
+ --platform "${platform}" ${EXTRA_INSTALL_FLAGS:+-e EXTRA_INSTALL_FLAGS="${EXTRA_INSTALL_FLAGS}"} \
+ "${DOCKER_IMAGE_NAME}" /bin/sh /netdata/packaging/makeself/build.sh "${@}"
else
run ${docker} run --rm -e BUILDARCH="${BUILDARCH}" -v "$(pwd)":/netdata:rw \
- -e GITHUB_ACTIONS="${GITHUB_ACTIONS}" "${DOCKER_IMAGE_NAME}" \
- /bin/sh /netdata/packaging/makeself/build.sh "${@}"
+ -e GITHUB_ACTIONS="${GITHUB_ACTIONS}" --platform "${platform}" \
+ ${EXTRA_INSTALL_FLAGS:+-e EXTRA_INSTALL_FLAGS="${EXTRA_INSTALL_FLAGS}"} \
+ "${DOCKER_IMAGE_NAME}" /bin/sh /netdata/packaging/makeself/build.sh "${@}"
fi
diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh
index 3ac600ed4..dca635f0a 100755
--- a/packaging/makeself/build.sh
+++ b/packaging/makeself/build.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-3.0-or-later
# -----------------------------------------------------------------------------
@@ -33,9 +33,6 @@ chown -R root:root /usr/src/netdata
cd /usr/src/netdata/packaging/makeself || exit 1
-git clean -dxf
-git submodule foreach --recursive git clean -dxf
-
cat >&2 << EOF
This program will create a self-extracting shell package containing
a statically linked netdata, able to run on any 64bit Linux system,
diff --git a/packaging/makeself/bundled-packages b/packaging/makeself/bundled-packages.version
index 02ee4469d..02ee4469d 100644
--- a/packaging/makeself/bundled-packages
+++ b/packaging/makeself/bundled-packages.version
diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh
index c3289c7cd..4057fbee0 100755
--- a/packaging/makeself/functions.sh
+++ b/packaging/makeself/functions.sh
@@ -52,7 +52,7 @@ fetch() {
# Check SHA256 of gzip'd tar file (apparently alpine's sha256sum requires
# two empty spaces between the checksum and the file's path)
set +e
- echo "${sha256} ${NETDATA_MAKESELF_PATH}/tmp/${tar}" | sha256sum -c -s
+ echo "${sha256} ${NETDATA_MAKESELF_PATH}/tmp/${tar}" | sha256sum --c --status
local rc=$?
if [ ${rc} -ne 0 ]; then
echo >&2 "SHA256 verification of tar file ${tar} failed (rc=${rc})"
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
index e4c133459..d2350a790 100755
--- a/packaging/makeself/install-or-update.sh
+++ b/packaging/makeself/install-or-update.sh
@@ -27,6 +27,8 @@ fi
STARTIT=1
REINSTALL_OPTIONS=""
+NETDATA_CERT_MODE="${NETDATA_CERT_MODE:-auto}"
+NETDATA_CERT_TEST_URL="${NETDATA_CERT_TEST_URL:-https://app.netdata.cloud}"
RELEASE_CHANNEL="nightly"
while [ "${1}" ]; do
@@ -48,6 +50,19 @@ while [ "${1}" ]; do
NETDATA_DISABLE_TELEMETRY=1
REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
;;
+ "--certificates")
+ case "${2}" in
+ auto|system) NETDATA_CERT_MODE="auto" ;;
+ check) NETDATA_CERT_MODE="check" ;;
+ bundled) NETDATA_CERT_MODE="bundled" ;;
+ *) run_failed "Unknown certificate handling mode '${2}'. Supported modes are auto, check, system, and bundled."; exit 1 ;;
+ esac
+ shift 1
+ ;;
+ "--certificate-test-url")
+ NETDATA_CERT_TEST_URL="${2}"
+ shift 1
+ ;;
*) echo >&2 "Unknown option '${1}'. Ignoring it." ;;
esac
@@ -62,6 +77,14 @@ if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --disable-telemetry"
fi
+if [ -n "${NETDATA_CERT_MODE}" ]; then
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --certificates ${NETDATA_CERT_MODE}"
+fi
+
+if [ -n "${NETDATA_CERT_TEST_URL}" ]; then
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --certificate-test-url ${NETDATA_CERT_TEST_URL}"
+fi
+
# -----------------------------------------------------------------------------
progress "Attempt to create user/group netdata/netadata"
@@ -101,6 +124,10 @@ progress "Install logrotate configuration for netdata"
install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
+progress "Install journald configuration for netdata"
+
+install_netdata_journald_conf || run_failed "Cannot install journald file for netdata."
+
# -----------------------------------------------------------------------------
progress "Telemetry configuration"
@@ -172,7 +199,7 @@ fi
progress "changing plugins ownership and permissions"
-for x in apps.plugin perf.plugin slabinfo.plugin debugfs.plugin freeipmi.plugin ioping cgroup-network local-listeners ebpf.plugin nfacct.plugin xenstat.plugin python.d.plugin charts.d.plugin go.d.plugin ioping.plugin cgroup-network-helper.sh; do
+for x in ndsudo apps.plugin perf.plugin slabinfo.plugin debugfs.plugin freeipmi.plugin ioping cgroup-network local-listeners network-viewer.plugin ebpf.plugin nfacct.plugin xenstat.plugin python.d.plugin charts.d.plugin go.d.plugin ioping.plugin cgroup-network-helper.sh; do
f="usr/libexec/netdata/plugins.d/${x}"
if [ -f "${f}" ]; then
run chown root:${NETDATA_GROUP} "${f}"
@@ -190,7 +217,7 @@ if command -v setcap >/dev/null 2>&1; then
run setcap "cap_sys_admin=ep" "usr/libexec/netdata/plugins.d/perf.plugin"
fi
- run setcap "cap_net_admin,cap_net_raw=eip" "usr/libexec/netdata/plugins.d/go.d.plugin"
+ run setcap "cap_dac_read_search+epi cap_net_admin+epi cap_net_raw=eip" "usr/libexec/netdata/plugins.d/go.d.plugin"
else
for x in apps.plugin perf.plugin slabinfo.plugin debugfs.plugin; do
f="usr/libexec/netdata/plugins.d/${x}"
@@ -198,7 +225,7 @@ else
done
fi
-for x in freeipmi.plugin ioping cgroup-network local-listeners ebpf.plugin nfacct.plugin xenstat.plugin; do
+for x in ndsudo freeipmi.plugin ioping cgroup-network local-listeners network-viewer.plugin ebpf.plugin nfacct.plugin xenstat.plugin; do
f="usr/libexec/netdata/plugins.d/${x}"
if [ -f "${f}" ]; then
@@ -208,26 +235,73 @@ done
# -----------------------------------------------------------------------------
-echo "Configure TLS certificate paths"
-if [ ! -L /opt/netdata/etc/ssl ] && [ -d /opt/netdata/etc/ssl ] ; then
- echo "Preserving existing user configuration for TLS"
-else
+replace_symlink() {
+ target="${1}"
+ name="${2}"
+ rm -f "${name}"
+ ln -s "${target}" "${name}"
+}
+
+select_system_certs() {
if [ -d /etc/pki/tls ] ; then
- echo "Using /etc/pki/tls for TLS configuration and certificates"
- ln -sf /etc/pki/tls /opt/netdata/etc/ssl
+ echo "${1} /etc/pki/tls for TLS configuration and certificates"
+ replace_symlink /etc/pki/tls /opt/netdata/etc/ssl
elif [ -d /etc/ssl ] ; then
- echo "Using /etc/ssl for TLS configuration and certificates"
- ln -sf /etc/ssl /opt/netdata/etc/ssl
- else
- echo "Using bundled TLS configuration and certificates"
- ln -sf /opt/netdata/share/ssl /opt/netdata/etc/ssl
+ echo "${1} /etc/ssl for TLS configuration and certificates"
+ replace_symlink /etc/ssl /opt/netdata/etc/ssl
fi
+}
+
+select_internal_certs() {
+ echo "Using bundled TLS configuration and certificates"
+ replace_symlink /opt/netdata/share/ssl /opt/netdata/etc/ssl
+}
+
+certs_selected() {
+ [ -L /opt/netdata/etc/ssl ] || return 1
+}
+
+test_certs() {
+ /opt/netdata/bin/curl --fail --max-time 300 --silent --output /dev/null "${NETDATA_CERT_TEST_URL}"
+
+ case "$?" in
+ 35|77) echo "Failed to load certificate files for test." ; return 1 ;;
+ 60|82|83) echo "Certificates cannot be used to connect to ${NETDATA_CERT_TEST_URL}" ; return 1 ;;
+ 53|54|66) echo "Unable to use OpenSSL configuration associated with certificates" ; return 1 ;;
+ 0) echo "Successfully connected to ${NETDATA_CERT_TEST_URL} using certificates" ;;
+ *) echo "Unable to test certificates due to networking problems, blindly assuming they work" ;;
+ esac
+}
+
+# If the user has manually set up certificates, don’t mess with it.
+if [ ! -L /opt/netdata/etc/ssl ] && [ -d /opt/netdata/etc/ssl ] ; then
+ echo "Preserving existing user configuration for TLS"
+else
+ echo "Configure TLS certificate paths (mode: ${NETDATA_CERT_MODE})"
+ case "${NETDATA_CERT_MODE}" in
+ check)
+ select_system_certs "Testing"
+ if certs_selected && test_certs; then
+ select_system_certs "Using"
+ else
+ select_internal_certs
+ fi
+ ;;
+ bundled) select_internal_certs ;;
+ *)
+ select_system_certs "Using"
+ if ! certs_selected; then
+ select_internal_certs
+ fi
+ ;;
+ esac
fi
# -----------------------------------------------------------------------------
echo "Save install options"
grep -qv 'IS_NETDATA_STATIC_BINARY="yes"' "${NETDATA_PREFIX}/etc/netdata/.environment" || echo IS_NETDATA_STATIC_BINARY=\"yes\" >> "${NETDATA_PREFIX}/etc/netdata/.environment"
+REINSTALL_OPTIONS="$(echo "${REINSTALL_OPTIONS}" | awk '{gsub("/", "\\/"); print}')"
sed -i "s/REINSTALL_OPTIONS=\".*\"/REINSTALL_OPTIONS=\"${REINSTALL_OPTIONS}\"/" "${NETDATA_PREFIX}/etc/netdata/.environment"
# -----------------------------------------------------------------------------
diff --git a/packaging/makeself/jobs/20-openssl.install.sh b/packaging/makeself/jobs/20-openssl.install.sh
index 1158a6330..a07f9c947 100755
--- a/packaging/makeself/jobs/20-openssl.install.sh
+++ b/packaging/makeself/jobs/20-openssl.install.sh
@@ -4,7 +4,7 @@
# shellcheck source=packaging/makeself/functions.sh
. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
# Source of truth for all the packages we bundle in static builds
-. "$(dirname "${0}")/../bundled-packages"
+. "$(dirname "${0}")/../bundled-packages.version"
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building OpenSSL" || true
diff --git a/packaging/makeself/jobs/50-bash-5.1.16.install.sh b/packaging/makeself/jobs/50-bash-5.1.16.install.sh
index 7a302f2ee..cc74d0fc8 100755
--- a/packaging/makeself/jobs/50-bash-5.1.16.install.sh
+++ b/packaging/makeself/jobs/50-bash-5.1.16.install.sh
@@ -4,7 +4,7 @@
# shellcheck source=packaging/makeself/functions.sh
. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
# Source of truth for all the packages we bundle in static builds
-. "$(dirname "${0}")/../bundled-packages"
+. "$(dirname "${0}")/../bundled-packages.version"
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::building bash" || true
diff --git a/packaging/makeself/jobs/50-curl.install.sh b/packaging/makeself/jobs/50-curl.install.sh
index 824b30562..54f55480f 100755
--- a/packaging/makeself/jobs/50-curl.install.sh
+++ b/packaging/makeself/jobs/50-curl.install.sh
@@ -4,7 +4,7 @@
# shellcheck source=packaging/makeself/functions.sh
. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
# Source of truth for all the packages we bundle in static builds
-. "$(dirname "${0}")/../bundled-packages"
+. "$(dirname "${0}")/../bundled-packages.version"
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building cURL" || true
diff --git a/packaging/makeself/jobs/50-ioping-1.3.install.sh b/packaging/makeself/jobs/50-ioping-1.3.install.sh
index 6bd538e35..de6cb3241 100755
--- a/packaging/makeself/jobs/50-ioping-1.3.install.sh
+++ b/packaging/makeself/jobs/50-ioping-1.3.install.sh
@@ -4,7 +4,7 @@
# shellcheck source=packaging/makeself/functions.sh
. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
# Source of truth for all the packages we bundle in static builds
-. "$(dirname "${0}")/../bundled-packages" || exit 1
+. "$(dirname "${0}")/../bundled-packages.version" || exit 1
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building ioping" || true
diff --git a/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh b/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh
index 829752178..efde6976f 100755
--- a/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh
+++ b/packaging/makeself/jobs/50-libnetfilter_acct-1.0.3.install.sh
@@ -7,7 +7,7 @@
# shellcheck source=packaging/makeself/functions.sh
. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
# Source of truth for all the packages we bundle in static builds
-. "$(dirname "${0}")/../bundled-packages" || exit 1
+. "$(dirname "${0}")/../bundled-packages.version" || exit 1
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::building libnetfilter_acct" || true
diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh
index 83d28bf87..59074ec59 100755
--- a/packaging/makeself/jobs/70-netdata-git.install.sh
+++ b/packaging/makeself/jobs/70-netdata-git.install.sh
@@ -36,7 +36,8 @@ run ./netdata-installer.sh \
--use-system-protobuf \
--dont-scrub-cflags-even-though-it-may-break-things \
--one-time-build \
- --enable-lto
+ --enable-lto \
+ ${EXTRA_INSTALL_FLAGS:+${EXTRA_INSTALL_FLAGS}} \
# shellcheck disable=SC2015
[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Finishing netdata install" || true
diff --git a/packaging/makeself/jobs/90-netdata-runtime-check.sh b/packaging/makeself/jobs/90-netdata-runtime-check.sh
index a3c94ffcb..38ebc4c87 100755
--- a/packaging/makeself/jobs/90-netdata-runtime-check.sh
+++ b/packaging/makeself/jobs/90-netdata-runtime-check.sh
@@ -8,47 +8,17 @@ dump_log() {
cat ./netdata.log
}
-wait_for() {
- host="${1}"
- port="${2}"
- name="${3}"
- timeout="30"
-
- if command -v nc > /dev/null ; then
- netcat="nc"
- elif command -v netcat > /dev/null ; then
- netcat="netcat"
- else
- printf "Unable to find a usable netcat command.\n"
- return 1
- fi
-
- printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}"
-
- sleep 30
-
- i=0
- while ! ${netcat} -z "${host}" "${port}"; do
- sleep 1
- if [ "$i" -gt "$timeout" ]; then
- printf "Timed out!\n"
- return 1
- fi
- i="$((i + 1))"
- done
- printf "OK\n"
-}
-
trap dump_log EXIT
-"${NETDATA_INSTALL_PATH}/bin/netdata" -D > ./netdata.log 2>&1 &
+export NETDATA_LIBEXEC_PREFIX="${NETDATA_INSTALL_PATH}/usr/libexec/netdata"
+export NETDATA_SKIP_LIBEXEC_PARTS="freeipmi|xenstat|cups"
-wait_for localhost 19999 netdata || exit 1
+if [ "$(uname -m)" != "x86_64" ]; then
+ export NETDATA_SKIP_LIBEXEC_PARTS="${NETDATA_SKIP_LIBEXEC_PARTS}|ebpf"
+fi
-curl -sS http://127.0.0.1:19999/api/v1/info > ./response || exit 1
-
-cat ./response
+"${NETDATA_INSTALL_PATH}/bin/netdata" -D > ./netdata.log 2>&1 &
-jq '.version' ./response || exit 1
+"${NETDATA_SOURCE_PATH}/packaging/runtime-check.sh" || exit 1
trap - EXIT
diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh
index dd123c218..e9b4327bf 100755
--- a/packaging/makeself/run-all-jobs.sh
+++ b/packaging/makeself/run-all-jobs.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -e
diff --git a/packaging/makeself/uname2platform.sh b/packaging/makeself/uname2platform.sh
index 7eab706ec..34d76ff9f 100755
--- a/packaging/makeself/uname2platform.sh
+++ b/packaging/makeself/uname2platform.sh
@@ -8,6 +8,7 @@ BUILDARCH="${1}"
case "${BUILDARCH}" in
x86_64) echo "linux/amd64" ;;
+ armv6l) echo "linux/arm/v6" ;;
armv7l) echo "linux/arm/v7" ;;
aarch64) echo "linux/arm64/v8" ;;
ppc64le) echo "linux/ppc64le" ;;
diff --git a/packaging/protobuf.checksums b/packaging/protobuf.checksums
deleted file mode 100644
index 4a025c5fb..000000000
--- a/packaging/protobuf.checksums
+++ /dev/null
@@ -1 +0,0 @@
-89ac31a93832e204db6d73b1e80f39f142d5747b290f17340adce5be5b122f94 protobuf-cpp-3.19.4.tar.gz
diff --git a/packaging/protobuf.version b/packaging/protobuf.version
deleted file mode 100644
index de24deecf..000000000
--- a/packaging/protobuf.version
+++ /dev/null
@@ -1 +0,0 @@
-3.19.4
diff --git a/packaging/repoconfig/CMakeLists.txt b/packaging/repoconfig/CMakeLists.txt
new file mode 100644
index 000000000..415ad8807
--- /dev/null
+++ b/packaging/repoconfig/CMakeLists.txt
@@ -0,0 +1,250 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cmake_minimum_required(VERSION 3.16.0...3.30)
+
+list(APPEND RHEL_DISTROS centos centos-stream rocky almalinux cloudlinux)
+list(APPEND SUSE_DISTROS opensuse-leap opensuse-tumbleweed)
+list(APPEND RPM_DISTROS rhel opensuse ol amzn fedora)
+list(APPEND DEB_DISTROS debian ubuntu)
+
+set(DEB_GPG_KEY_SOURCE "https://repo.netdata.cloud/netdatabot.gpg.key")
+
+set(PACKAGE_VERSION 3)
+set(PACKAGE_RELEASE 4)
+
+set(CPACK_THREADS 0)
+set(CPACK_STRIP_FILES NO)
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "netdata")
+set(CPACK_PACKAGE_DIRECTORY "${CMAKE_BINARY_DIR}/packages")
+set(CPACK_PACKAGING_INSTALL_PREFIX "/")
+set(CPACK_PACKAGE_VENDOR "Netdata Inc.")
+set(CPACK_COMPONENT_NETDATA-REPO_PACKAGE_DESCRIPTION "Configuration for the official Netdata Stable package repository.")
+set(CPACK_COMPONENT_NETDATA-REPO-EDGE_PACKAGE_DESCRIPTION "Configuration for the official Netdata Edge package repository.")
+
+project(netdata-repoconfig VERSION "${PACKAGE_VERSION}.${PACKAGE_RELEASE}"
+ DESCRIPTION "Repository configuration for Netdata’s official native packages."
+ HOMEPAGE_URL "https://www.netdata.cloud/"
+ LANGUAGES NONE)
+
+function(extract_release_item _variable _item)
+ if(DEFINED "${_variable}")
+ return()
+ endif()
+
+ if(DEFINED OS_RELEASE_FILE)
+ else()
+ message(CHECK_START "Searching for os-release file")
+ find_file(OS_RELEASE_FILE os-release PATHS /etc /lib /usr/lib NO_DEFAULT_PATH)
+ if(${OS_RELEASE_FILE} STREQUAL "OS_RELEASE_FILE-NOTFOUND")
+ message(CHECK_FAIL "failed")
+ message(FATAL_ERROR "Could not find os-release file")
+ endif()
+
+ message(CHECK_PASS "${OS_RELEASE_FILE}")
+ endif()
+
+ message(CHECK_START "Extracting ${_item} from ${OS_RELEASE_FILE}")
+ execute_process(COMMAND sh -c ". ${OS_RELEASE_FILE} && printf %s $${_item}"
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _output)
+
+ if(NOT ${_result} EQUAL 0)
+ message(CHECK_FAIL "failed to parse ${OS_RELEASE_FILE}")
+ return()
+ elseif(${_output} STREQUAL "")
+ message(CHECK_FAIL "variable ${_item} not defined in ${OS_RELEASE_FILE}")
+ return()
+ endif()
+
+ message(CHECK_PASS ${_output})
+ set(${_variable} ${_output} PARENT_SCOPE)
+endfunction()
+
+function(require_command _variable _cmd)
+ if(DEFINED ${_variable})
+ return()
+ endif()
+
+ message(CHECK_START "Looking for ${_cmd}")
+
+ find_program(_result_${_cmd} ${_cmd})
+
+ if(${_result_${_cmd}} STREQUAL "_result_${_cmd}-NOTFOUND")
+ message(CHECK_FAIL "failed")
+ message(FATAL_ERROR "Unable to find required command: ${_cmd}")
+ endif()
+
+ message(CHECK_PASS "${_result_${_cmd}}")
+ set(${_variable} ${_result_${_cmd}} PARENT_SCOPE)
+endfunction()
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ extract_release_item("DISTRO" ID)
+
+ if(NOT DEFINED DISTRO)
+ message(FATAL_ERROR "Failed to auto-detect distro ID")
+ endif()
+
+ extract_release_item(DISTRO_VERSION VERSION_ID)
+
+ if(NOT DEFINED DISTRO_VERSION)
+ message(FATAL_ERROR "Failed to auto-detect distro version ID.")
+ endif()
+else()
+ message(FATAL_ERROR "Repository configuration packages can only be built on Linux")
+endif()
+
+if(${DISTRO} IN_LIST RHEL_DISTROS)
+ set(DISTRO "rhel")
+elseif(${DISTRO} STREQUAL "opensuse-leap")
+ set(DISTRO "opensuse")
+elseif(${DISTRO} STREQUAL "opensuse-tumbleweed")
+ set(DISTRO "opensuse")
+ set(DISTRO_VERSION "tumbleweed")
+endif()
+
+if(${DISTRO} IN_LIST DEB_DISTROS)
+ extract_release_item(SUITE VERSION_CODENAME)
+
+ if(NOT DEFINED SUITE)
+ message(FATAL_ERROR "Failed to determine version codename")
+ endif()
+
+ require_command(DPKG dpkg)
+ require_command(CURL curl)
+ require_command(GPG gpg)
+
+ set(DIST_NAME ${DISTRO})
+ message(STATUS "Generating stable repository configuration for ${DISTRO} ${SUITE}")
+ set(VARIANT stable)
+ configure_file(netdata.sources.in netdata.sources @ONLY)
+ message(STATUS "Generating edge repository configuration for ${DISTRO} ${SUITE}")
+ set(VARIANT edge)
+ configure_file(netdata.sources.in netdata-edge.sources @ONLY)
+ message(STATUS "Preparing changelogs")
+ set(PKG_NAME netdata-repo)
+ file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/netdata-repo)
+ configure_file(deb.changelog netdata-repo/changelog @ONLY)
+ set(PKG_NAME netdata-repo-edge)
+ file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/netdata-repo-edge)
+ configure_file(deb.changelog netdata-repo-edge/changelog @ONLY)
+
+ install(FILES ${CMAKE_BINARY_DIR}/netdata.sources
+ DESTINATION etc/apt/sources.list.d
+ COMPONENT netdata-repo)
+ install(FILES ${CMAKE_BINARY_DIR}/netdata-edge.sources
+ DESTINATION etc/apt/sources.list.d
+ COMPONENT netdata-repo-edge)
+
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/netdatabot.gpg.key
+ COMMENT "Fetch GPG key from ${DEB_GPG_KEY_SOURCE}"
+ COMMAND ${CURL} -f -L -o ${CMAKE_BINARY_DIR}/netdatabot.gpg.key ${DEB_GPG_KEY_SOURCE})
+
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/netdata.gpg
+ COMMENT "Dearmor ${CMAKE_BINARY_DIR}/netdatabot.gpg.key"
+ DEPENDS ${CMAKE_BINARY_DIR}/netdatabot.gpg.key
+ COMMAND ${GPG} --dearmor --output ${CMAKE_BINARY_DIR}/netdata.gpg ${CMAKE_BINARY_DIR}/netdatabot.gpg.key)
+ add_custom_target(dearmor_gpg_key
+ ALL
+ COMMENT "Dearmor ${CMAKE_BINARY_DIR}/netdatabot.gpg.key"
+ DEPENDS ${CMAKE_BINARY_DIR}/netdata.gpg)
+
+ install(FILES ${CMAKE_BINARY_DIR}/netdata.gpg
+ DESTINATION usr/share/keyrings
+ RENAME netdata-archive-keyring.gpg
+ PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
+ COMPONENT netdata-repo)
+
+ install(FILES ${CMAKE_BINARY_DIR}/netdata.gpg
+ DESTINATION usr/share/keyrings
+ RENAME netdata-archive-keyring.gpg
+ PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
+ COMPONENT netdata-repo-edge)
+
+ set(CPACK_DEB_COMPONENT_INSTALL YES)
+ set(CPACK_DEBIAN_DEBUGINFO_PACKAGE NO)
+ set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS NO)
+ set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS YES)
+ set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
+ set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Netdata Builder <bot@netdata.cloud>")
+ set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "all")
+ set(CPACK_DEBIAN_PACKAGE_DEPENDS "debian-keyring, gnupg")
+ set(CPACK_DEBIAN_PACKAGE_SECTION "net")
+ set(CPACK_DEBIAN_PACKAGE_VERSION "${PACKAGE_VERSION}")
+ set(CPACK_DEBIAN_PACKAGE_RELEASE "${PACKAGE_RELEASE}")
+ set(CPACK_DEBIAN_NETDATA-REPO_PACKAGE_NAME "netdata-repo")
+ set(CPACK_DEBIAN_NETDATA-REPO-EDGE_PACKAGE_NAME "netdata-repo-edge")
+ set(CPACK_DEBIAN_NETDATA-REPO_PACKAGE_CONFLICTS "netdata-repo-edge")
+ set(CPACK_DEBIAN_NETDATA-REPO-EDGE_PACKAGE_CONFLICTS "netdata-repo")
+ set(CPACK_DEBIAN_NETDATA-REPO_PACKAGE_CONTROL_EXTRA "${CMAKE_BINARY_DIR}/netdata-repo/changelog")
+ set(CPACK_DEBIAN_NETDATA-REPO-EDGE_PACKAGE_CONTROL_EXTRA "${CMAKE_BINARY_DIR}/netdata-repo-edge/changelog")
+elseif(${DISTRO} IN_LIST RPM_DISTROS)
+ require_command(RPM rpm)
+
+ if(${RPM} STREQUAL "RPM-NOTFOUND")
+ message(FATAL_ERROR "Unable to find rpm, which is required for RPM package builds.")
+ endif()
+
+ set(REPO_CFG_PATH "yum.repos.d")
+ set(REPO_ID "dnf")
+ set(DIST_NAME "${DISTRO}")
+ set(DIST_VERSION "$releasever")
+
+ if(${DISTRO} STREQUAL "amzn")
+ set(DIST_NAME "amazonlinux")
+ if(${DISTRO_VERSION} VERSION_EQUAL 2)
+ # Nothing to do in this case, defaults work here.
+ elseif(${DISTRO_VERSION} VERSION_EQUAL 2023)
+ set(DIST_VERSION "2023")
+ else()
+ message(FATAL_ERROR "Unsupported version of Amazon Linux: ${DISTRO_VERSION}")
+ endif()
+ elseif(${DISTRO} STREQUAL "opensuse")
+ set(REPO_CFG_PATH "zypp/repos.d")
+ set(REPO_ID "zypp")
+ set(DIST_NAME "opensuse")
+ elseif(${DISTRO} STREQUAL "rhel")
+ set(DIST_NAME "el")
+
+ if(${DISTRO_VERSION} VERSION_LESS_EQUAL 8)
+ set(CPACK_RPM_PACKAGE_REQUIRES "yum-plugin-priorities, epel-release")
+ else()
+ set(CPACK_RPM_PACKAGE_REQUIRES "epel-release")
+ endif()
+ endif()
+
+ message(STATUS "Generating stable repository configuration for ${DISTRO} ${DISTRO_VERSION}")
+ set(VARIANT stable)
+ configure_file(netdata.repo.${REPO_ID} netdata.repo @ONLY)
+ message(STATUS "Generating edge repository configuration for ${DISTRO} ${DISTRO_VERSION}")
+ set(VARIANT edge)
+ configure_file(netdata.repo.${REPO_ID} netdata-edge.repo @ONLY)
+
+ install(FILES ${CMAKE_BINARY_DIR}/netdata.repo
+ COMPONENT netdata-repo
+ DESTINATION etc/${REPO_CFG_PATH})
+ install(FILES ${CMAKE_BINARY_DIR}/netdata-edge.repo
+ COMPONENT netdata-repo-edge
+ DESTINATION etc/${REPO_CFG_PATH})
+
+ set(CPACK_RPM_COMPONENT_INSTALL ON)
+ set(CPACK_RPM_PACAKGE_AUTOREQPROV OFF)
+ set(CPACK_RPM_DEBUGINFO_PACKAGE OFF)
+ set(CPACK_RPM_PACKAGE_LICENSE "GPLv2")
+ set(CPACK_RPM_PACKAGE_GROUP "System Environment/Base")
+ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST "")
+ set(CPACK_RPM_PACKAGE_ARCHITECTURE "noarch")
+ set(CPACK_RPM_PACKAGE_VERSION "${PACKAGE_VERSION}")
+ set(CPACK_RPM_PACKAGE_RELEASE "${PACKAGE_RELEASE}")
+ set(CPACK_RPM_PACKAGE_CHANGELOG "${CMAKE_SOURCE_DIR}/rpm.changelog")
+ set(CPACK_RPM_NETDATA-REPO_FILE_NAME "netdata-repo-${PACKAGE_VERSION}-${PACKAGE_RELEASE}.noarch.rpm")
+ set(CPACK_RPM_NETDATA-REPO_PACKAGE_NAME "netdata-repo")
+ set(CPACK_RPM_NETDATA-REPO_PACAKGE_CONFLICTS "netdata-repo-edge")
+ set(CPACK_RPM_NETDATA-REPO-EDGE_FILE_NAME "netdata-repo-edge-${PACKAGE_VERSION}-${PACKAGE_RELEASE}.noarch.rpm")
+ set(CPACK_RPM_NETDATA-REPO-EDGE_PACKAGE_NAME "netdata-repo-edge")
+ set(CPACK_RPM_NETDATA-REPO-EDGE_PACKAGE_CONFLICTS "netdata-repo")
+else()
+ message(FATAL_ERROR "Unsupported distribution ${DISTRO} ${DISTRO_VERSION}")
+endif()
+
+include(CPack)
diff --git a/packaging/repoconfig/Makefile b/packaging/repoconfig/Makefile
deleted file mode 100644
index 18b9887fe..000000000
--- a/packaging/repoconfig/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-FILES = netdata.list netdata-edge.list netdata-archive-keyring.gpg netdata-edge-archive-keyring.gpg netdata-repoconfig-archive-keyring.gpg
-
-all: $(FILES)
-
-netdata.list: netdata.list.in
- cp netdata.list.in netdata.list
- set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__/stable/" netdata.list
-
-netdata-edge.list: netdata.list.in
- cp netdata.list.in netdata-edge.list
- set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__/edge/" netdata-edge.list
-
-netdata.gpg.key:
- curl -L https://repo.netdata.cloud/netdatabot.gpg.key > $@
-
-netdata-archive-keyring.gpg: netdata.gpg.key
- gpg --dearmor > $@ < $<
-
-netdata-edge-archive-keyring.gpg: netdata.gpg.key
- gpg --dearmor > $@ < $<
-
-netdata-repoconfig-archive-keyring.gpg: netdata.gpg.key
- gpg --dearmor > $@ < $<
-
-debian/tmp:
- mkdir -p $@
-
-install: $(FILES) debian/tmp
- cp $(FILES) debian/tmp/
-
-clean:
- rm -f $(FILES)
-
-.PHONY: clean
-.INTERMEDIATE: netdatabot.gpg.key
diff --git a/packaging/repoconfig/build-deb.sh b/packaging/repoconfig/build-deb.sh
index 97f929a68..188d849cf 100755
--- a/packaging/repoconfig/build-deb.sh
+++ b/packaging/repoconfig/build-deb.sh
@@ -1,49 +1,46 @@
#!/bin/sh
-# Extract distro info from /etc/os-release
-DISTVERS="$(awk -F'"' '/VERSION_ID=/ {print $2}' /etc/os-release)"
-DISTNAME="$(awk -F'=' '/^ID=/ {print $2}' /etc/os-release)"
+set -e
+
+SRC_DIR="$(CDPATH='' cd -- "$(dirname -- "${0}")" && pwd -P)"
+BUILD_DIR=/build
+DISTRO="$(awk -F'=' '/^ID=/ {print $2}' /etc/os-release)"
+DISTRO_VERSION="$(awk -F'"' '/VERSION_ID=/ {print $2}' /etc/os-release)"
# Needed because dpkg is stupid and tries to configure things interactively if it sees a terminal.
export DEBIAN_FRONTEND=noninteractive
-# Pull in our dependencies
-apt update || exit 1
-apt upgrade -y || exit 1
-apt install -y build-essential debhelper curl gnupg || exit 1
+echo "::group::Installing Build Dependencies"
+apt update
+apt upgrade -y
+apt install -y --no-install-recommends ca-certificates cmake ninja-build curl gnupg
+echo "::endgroup::"
+
+echo "::group::Building Packages"
+cmake -S "${SRC_DIR}" -B "${BUILD_DIR}" -G Ninja
+cmake --build "${BUILD_DIR}"
-# Run the builds in an isolated source directory.
-# This removes the need for cleanup, and ensures anything the build does
-# doesn't muck with the user's sources.
-cp -a /netdata/packaging/repoconfig /usr/src || exit 1
-cd /usr/src/repoconfig || exit 1
+cd "${BUILD_DIR}"
+cpack -G DEB
+echo "::endgroup::"
-# pre/post options are after 1.18.8, is simpler to just check help for their existence than parsing version
-if dpkg-buildpackage --help | grep "\-\-post\-clean" 2> /dev/null > /dev/null; then
- dpkg-buildpackage --post-clean --pre-clean -b -us -uc || exit 1
-else
- dpkg-buildpackage -b -us -uc || exit 1
-fi
+[ -d "${SRC_DIR}/artifacts" ] || mkdir -p "${SRC_DIR}/artifacts"
# Embed distro info in package name.
-# This is required to make the repo actually standards compliant wthout packageclouds hacks.
-distid="${DISTNAME}${DISTVERS}"
-for pkg in /usr/src/*.deb; do
- pkgname="$(basename "${pkg}" .deb)"
+# This is required to make the repo actually standards compliant wthout packagecloud's hacks.
+distid="${DISTRO}${DISTRO_VERSION}"
+for pkg in "${BUILD_DIR}"/packages/*.deb; do
+ extension="${pkg##*.}"
+ pkgname="$(basename "${pkg}" "${extension}")"
name="$(echo "${pkgname}" | cut -f 1 -d '_')"
version="$(echo "${pkgname}" | cut -f 2 -d '_')"
arch="$(echo "${pkgname}" | cut -f 3 -d '_')"
- newname="$(dirname "${pkg}")/${name}_${version}+${distid}_${arch}.deb"
+ newname="${SRC_DIR}/artifacts/${name}_${version}+${distid}_${arch}${extension}"
mv "${pkg}" "${newname}"
done
-# Copy the built packages to /netdata/artifacts (which may be bind-mounted)
-# Also ensure /netdata/artifacts exists and create it if it doesn't
-[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts
-cp -a /usr/src/*.deb /netdata/artifacts/ || exit 1
-
# Correct ownership of the artifacts.
# Without this, the artifacts directory and it's contents end up owned
# by root instead of the local user on Linux boxes
-chown -R --reference=/netdata /netdata/artifacts
+chown -R --reference="${SRC_DIR}" "${SRC_DIR}/artifacts"
diff --git a/packaging/repoconfig/build-rpm.sh b/packaging/repoconfig/build-rpm.sh
index 6c07c6619..537b1524f 100755
--- a/packaging/repoconfig/build-rpm.sh
+++ b/packaging/repoconfig/build-rpm.sh
@@ -1,26 +1,46 @@
#!/bin/sh
-prefix='/root/rpmbuild'
+set -e
+SRC_DIR="$(CDPATH='' cd -- "$(dirname -- "${0}")" && pwd -P)"
+BUILD_DIR=/build
+
+echo "::group::Installing Build Dependencies"
if command -v dnf > /dev/null ; then
dnf distro-sync -y --nodocs || exit 1
- dnf install -y --nodocs --setopt=install_weak_deps=False rpm-build || exit 1
+ dnf install -y --nodocs --setopt=install_weak_deps=False rpm-build cmake make || exit 1
elif command -v yum > /dev/null ; then
yum distro-sync -y || exit 1
- yum install -y rpm-build || exit 1
+ yum install -y rpm-build make || exit 1
+ curl --fail -sSL --connect-timeout 20 --retry 3 --output "cmake-linux-$(uname -m).sh" "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-$(uname -m).sh" && \
+ if [ "$(uname -m)" = "x86_64" ]; then \
+ echo '8c449dabb2b2563ec4e6d5e0fb0ae09e729680efab71527b59015131cea4a042 cmake-linux-x86_64.sh' | sha256sum -c - ; \
+ elif [ "$(uname -m)" = "aarch64" ]; then \
+ echo 'a83e01ed1cdf44c2e33e0726513b9a35a8c09e3b5a126fd720b3c8a9d5552368 cmake-linux-aarch64.sh' | sha256sum -c - ; \
+ else \
+ echo "ARCH NOT SUPPORTED BY CMAKE" ; \
+ exit 1 ; \
+ fi && \
+ chmod +x "./cmake-linux-$(uname -m).sh" && \
+ mkdir -p /cmake && \
+ "./cmake-linux-$(uname -m).sh" --skip-license --prefix=/cmake
+ PATH="/cmake/bin:${PATH}"
elif command -v zypper > /dev/null ; then
zypper update -y || exit 1
- zypper install -y rpm-build || exit 1
- prefix="/usr/src/packages"
+ zypper install -y rpm-build cmake make || exit 1
fi
+echo "::endgroup::"
+
+echo "::group::Building Packages"
+cmake -S "${SRC_DIR}" -B "${BUILD_DIR}"
+cmake --build "${BUILD_DIR}"
-mkdir -p "${prefix}/BUILD" "${prefix}/RPMS" "${prefix}/SRPMS" "${prefix}/SPECS" "${prefix}/SOURCES" || exit 1
-cp -a /netdata/packaging/repoconfig/netdata-repo.spec "${prefix}/SPECS" || exit 1
-cp -a /netdata/packaging/repoconfig/* "${prefix}/SOURCES/" || exit 1
+cd "${BUILD_DIR}"
+cpack -G RPM
+echo "::endgroup::"
-rpmbuild -bb --rebuild "${prefix}/SPECS/netdata-repo.spec" || exit 1
+[ -d "${SRC_DIR}/artifacts" ] || mkdir -p "${SRC_DIR}/artifacts"
-[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts
-find "${prefix}/RPMS/" -type f -name '*.rpm' -exec cp '{}' /netdata/artifacts \; || exit 1
+find "${BUILD_DIR}/packages/" -type f -name '*.rpm' -exec cp '{}' "${SRC_DIR}/artifacts" \; || exit 1
-chown -R --reference=/netdata /netdata/artifacts
+chown -R --reference="${SRC_DIR}" "${SRC_DIR}/artifacts"
diff --git a/packaging/repoconfig/deb.changelog b/packaging/repoconfig/deb.changelog
new file mode 100644
index 000000000..6d1dca883
--- /dev/null
+++ b/packaging/repoconfig/deb.changelog
@@ -0,0 +1,49 @@
+@PKG_NAME@ (3-4) unstable; urgency=medium
+
+ * Convert sources to DEB822 format
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 19 Aug 2024 07:49:00 -0400
+
+@PKG_NAME@ (3-3) unstable; urgency=medium
+
+ * Version bump to keep in sync with RPM repo packages
+
+ -- Netdata Builder <bot@netdata.cloud> Fri, 9 Aug 2024 09:37:00 -0400
+
+@PKG_NAME@ (3-2) unstable; urgency=medium
+
+ * Version bump to keep in sync with RPM repo packages
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 24 Jun 2024 07:54:00 -0400
+
+@PKG_NAME@ (3-1) unstable; urgency=medium
+
+ * Migrate to CPack for package builds
+
+ -- Netdata Builder <bot@netdata.cloud> Fri, 14 Jun 2024 08:22:00 -0400
+
+@PKG_NAME@ (2-2) unstable; urgency=medium
+
+ * Version bump to keep in sync with RPM repo packages
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 13 Nov 2023 11:15:00 -0500
+
+@PKG_NAME@ (2-1) unstable; urgency=medium
+
+ * Switched to new package hosting infrastructure
+ * Removed apt-transport-https requirement
+
+ -- Netdata Builder <bot@netdata.cloud> Wed, 18 Jan 2023 08:30:00 -0500
+
+@PKG_NAME@ (1-2) unstable; urgency=medium
+
+ * Fixed package file naming for repo layout compliance
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 6 Jun 2022 09:30:00 -0400
+
+@PKG_NAME@ (1-1) unstable; urgency=medium
+
+ * Initial Release
+
+ -- Netdata Builder <bot@netdata.cloud> Mon, 14 Jun 2021 08:00:00 -0400
+
diff --git a/packaging/repoconfig/debian/changelog b/packaging/repoconfig/debian/changelog
deleted file mode 100644
index d056fa43b..000000000
--- a/packaging/repoconfig/debian/changelog
+++ /dev/null
@@ -1,25 +0,0 @@
-netdata-repo (2-2) unstable; urgency=medium
-
- * Version bump to keep in sync with RPM repo packages
-
- -- Netdata Builder <bot@netdata.cloud> Mon, 13 Nov 2023 11:15:00 -0500
-
-netdata-repo (2-1) unstable; urgency=medium
-
- * Switched to new package hosting infrastructure
- * Removed apt-transport-https requirement
-
- -- Netdata Builder <bot@netdata.cloud> Wed, 18 Jan 2023 08:30:00 -0500
-
-netdata-repo (1-2) unstable; urgency=medium
-
- * Fixed package file naming for repo layout compliance
-
- -- Netdata Builder <bot@netdata.cloud> Mon, 6 Jun 2022 09:30:00 -0500
-
-netdata-repo (1-1) unstable; urgency=medium
-
- * Initial Release
-
- -- Netdata Builder <bot@netdata.cloud> Mon, 14 Jun 2021 08:00:00 -0500
-
diff --git a/packaging/repoconfig/debian/compat b/packaging/repoconfig/debian/compat
deleted file mode 100644
index ec635144f..000000000
--- a/packaging/repoconfig/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/packaging/repoconfig/debian/control b/packaging/repoconfig/debian/control
deleted file mode 100644
index fdea6a829..000000000
--- a/packaging/repoconfig/debian/control
+++ /dev/null
@@ -1,19 +0,0 @@
-Source: netdata-repo
-Section: net
-Priority: optional
-Maintainer: Netdata Builder <bot@netdata.cloud>
-Standards-Version: 3.9.6
-Build-Depends: debhelper (>= 9), curl, gnupg
-Homepage: https://netdata.cloud
-
-Package: netdata-repo
-Architecture: all
-Depends: debian-archive-keyring, gnupg
-Conflicts: netdata-repo-edge
-Description: Configuration for the official Netdata Stable package repository.
-
-Package: netdata-repo-edge
-Architecture:all
-Depends: debian-archive-keyring, gnupg
-Conflicts: netdata-repo
-Description: Configuration for the official Netdata Edge package repository.
diff --git a/packaging/repoconfig/debian/copyright b/packaging/repoconfig/debian/copyright
deleted file mode 100644
index 44b59693d..000000000
--- a/packaging/repoconfig/debian/copyright
+++ /dev/null
@@ -1,10 +0,0 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: Netdata
-Upstream-Contact: Costa Tsaousis <costa@netdata.cloud>
-Source: https://github.com/netdata/netdata
-
-Files: *
-Copyright: 2021-2023 Netdata Inc.
-License: GPL-3+
- On Debian systems, the complete text of the GNU General Public
- License version 3 can be found in /usr/share/common-licenses/GPL-3.
diff --git a/packaging/repoconfig/debian/rules b/packaging/repoconfig/debian/rules
deleted file mode 100755
index 0151b96ea..000000000
--- a/packaging/repoconfig/debian/rules
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/make -f
-
-TOP = $(CURDIR)/debian/netdata-repo
-TOP_EDGE = $(CURDIR)/debian/netdata-repo-edge
-TEMPTOP = $(CURDIR)/debian/tmp
-
-%:
- dh $@
-
-override_dh_configure:
- true
-
-override_dh_install:
- mkdir -p $(TOP)/etc/apt/sources.list.d $(TOP)/etc/apt/trusted.gpg.d/
- mv -f $(TEMPTOP)/netdata.list $(TOP)/etc/apt/sources.list.d
- mv -f $(TEMPTOP)/netdata-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d
- cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d
- mkdir -p $(TOP_EDGE)/etc/apt/sources.list.d $(TOP_EDGE)/etc/apt/trusted.gpg.d/
- mv -f $(TEMPTOP)/netdata-edge.list $(TOP_EDGE)/etc/apt/sources.list.d
- mv -f $(TEMPTOP)/netdata-edge-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d
- cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d
diff --git a/packaging/repoconfig/debian/source/format b/packaging/repoconfig/debian/source/format
deleted file mode 100644
index 163aaf8d8..000000000
--- a/packaging/repoconfig/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/packaging/repoconfig/netdata-edge.repo.al b/packaging/repoconfig/netdata-edge.repo.al
deleted file mode 100644
index 4a300a26e..000000000
--- a/packaging/repoconfig/netdata-edge.repo.al
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata-edge]
-name=Netdata Edge
-baseurl=https://repo.netdata.cloud/repos/edge/amazonlinux/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/amazonlinux/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.centos b/packaging/repoconfig/netdata-edge.repo.centos
deleted file mode 100644
index fd96f0d71..000000000
--- a/packaging/repoconfig/netdata-edge.repo.centos
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata-edge]
-name=Netdata Edge
-baseurl=https://repo.netdata.cloud/repos/edge/el/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/el/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.fedora b/packaging/repoconfig/netdata-edge.repo.fedora
deleted file mode 100644
index 03b0e9c7c..000000000
--- a/packaging/repoconfig/netdata-edge.repo.fedora
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata-edge]
-name=Netdata Edge
-baseurl=https://repo.netdata.cloud/repos/edge/fedora/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/fedora/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.ol b/packaging/repoconfig/netdata-edge.repo.ol
deleted file mode 100644
index 89f74e712..000000000
--- a/packaging/repoconfig/netdata-edge.repo.ol
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata-edge]
-name=Netdata Edge
-baseurl=https://repo.netdata.cloud/repos/edge/ol/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/ol/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata-edge.repo.suse b/packaging/repoconfig/netdata-edge.repo.suse
deleted file mode 100644
index f65bd08d7..000000000
--- a/packaging/repoconfig/netdata-edge.repo.suse
+++ /dev/null
@@ -1,19 +0,0 @@
-[netdata-edge]
-name=Netdata Edge
-baseurl=https://repo.netdata.cloud/repos/edge/opensuse/$releasever/$basearch
-repo_gpgcheck=1
-pkg_gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-type=rpm-md
-autorefresh=1
-
-[netdata-repoconfig]
-name=Netdata Repoconfig
-baseurl=https://repo.netdata.cloud/repos/repoconfig/opensuse/$releasever/$basearch
-repo_gpgcheck=1
-pkg_gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-type=rpm-md
-autorefresh=1
diff --git a/packaging/repoconfig/netdata-repo.spec b/packaging/repoconfig/netdata-repo.spec
deleted file mode 100644
index 242178ba7..000000000
--- a/packaging/repoconfig/netdata-repo.spec
+++ /dev/null
@@ -1,118 +0,0 @@
-%{?rhel:%global centos_ver %rhel}
-
-Name: netdata-repo
-Version: 2
-Release: 2
-Summary: Netdata stable repositories configuration.
-
-Group: System Environment/Base
-License: GPLv2
-
-Source0: netdata.repo.fedora
-Source1: netdata-edge.repo.fedora
-Source2: netdata.repo.suse
-Source3: netdata-edge.repo.suse
-Source4: netdata.repo.centos
-Source5: netdata-edge.repo.centos
-Source6: netdata.repo.ol
-Source7: netdata-edge.repo.ol
-Source8: netdata.repo.al
-Source9: netdata-edge.repo.al
-
-BuildArch: noarch
-
-%if 0%{?centos_ver} && 0%{?centos_ver} < 8
-Requires: yum-plugin-priorities
-%endif
-
-%if 0%{?centos_ver} && 0%{!?amazon_linux:1} && 0%{!?oraclelinux:1}
-Requires: epel-release
-%endif
-
-# Overlapping file installs
-Conflicts: netdata-repo-edge
-
-%description
-This package contains the official Netdata package repository configuration for stable versions of Netdata.
-
-%prep
-%setup -q -c -T
-
-%if 0%{?fedora}
-install -pm 644 %{SOURCE0} ./netdata.repo
-install -pm 644 %{SOURCE1} ./netdata-edge.repo
-%endif
-
-%if 0%{?suse_version}
-install -pm 644 %{SOURCE2} ./netdata.repo
-install -pm 644 %{SOURCE3} ./netdata-edge.repo
-%endif
-
-%if 0%{?centos_ver}
-# Amazon Linux 2 looks like CentOS, but with extra macros.
-%if 0%{?amzn2}
-install -pm 644 %{SOURCE8} ./netdata.repo
-install -pm 644 %{SOURCE9} ./netdata-edge.repo
-%else
-install -pm 644 %{SOURCE4} ./netdata.repo
-install -pm 644 %{SOURCE5} ./netdata-edge.repo
-%endif
-%endif
-
-%if 0%{?oraclelinux}
-install -pm 644 %{SOURCE6} ./netdata.repo
-install -pm 644 %{SOURCE7} ./netdata-edge.repo
-%endif
-
-%build
-true
-
-%install
-rm -rf $RPM_BUILD_ROOT
-
-%if 0%{?suse_version}
-install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
-install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
-install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d
-%else
-install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
-install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
-install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d
-%endif
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-%files
-%if 0%{?suse_version}
-%attr(644,root,root) /etc/zypp/repos.d/netdata.repo
-%else
-%attr(644,root,root) /etc/yum.repos.d/netdata.repo
-%endif
-
-%package edge
-Summary: Netdata nightly repositories configuration.
-Group: System Environment/Base
-
-# Overlapping file installs
-Conflicts: netdata-repo
-
-%description edge
-This package contains the official Netdata package repository configuration for nightly versions of Netdata.
-
-%files edge
-%if 0%{?suse_version}
-%attr(644,root,root) /etc/zypp/repos.d/netdata-edge.repo
-%else
-%attr(644,root,root) /etc/yum.repos.d/netdata-edge.repo
-%endif
-
-%changelog
-* Mon Nov 13 2023 Austin Hemmelgarn <austin@netdata.cloud> 2-2
-- Add EPEL requirement for RHEL packages.
-* Wed Dec 7 2022 Austin Hemmelgarn <austin@netdata.cloud> 2-1
-- Switch to new hosting at repo.netdata.cloud.
-* Mon Jun 6 2022 Austin Hemmelgarn <austin@netdata.cloud> 1-2
-- Bump release to keep in sync with DEB package.
-* Mon Jun 14 2021 Austin Hemmelgarn <austin@netdata.cloud> 1-1
-- Initial revision
diff --git a/packaging/repoconfig/netdata.list.in b/packaging/repoconfig/netdata.list.in
deleted file mode 100644
index a49dbd91c..000000000
--- a/packaging/repoconfig/netdata.list.in
+++ /dev/null
@@ -1,2 +0,0 @@
-deb http://repo.netdata.cloud/repos/__VARIANT__/__DISTRO__/ __SUITE__/
-deb http://repo.netdata.cloud/repos/repoconfig/__DISTRO__/ __SUITE__/
diff --git a/packaging/repoconfig/netdata.repo.al b/packaging/repoconfig/netdata.repo.al
deleted file mode 100644
index 0bacb3a10..000000000
--- a/packaging/repoconfig/netdata.repo.al
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata]
-name=Netdata
-baseurl=https://repo.netdata.cloud/repos/stable/amazonlinux/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/amazonlinux/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata.repo.centos b/packaging/repoconfig/netdata.repo.centos
deleted file mode 100644
index 221e64513..000000000
--- a/packaging/repoconfig/netdata.repo.centos
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata]
-name=Netdata
-baseurl=https://repo.netdata.cloud/repos/stable/el/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/el/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata.repo.dnf b/packaging/repoconfig/netdata.repo.dnf
new file mode 100644
index 000000000..3a64a2a58
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.dnf
@@ -0,0 +1,19 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+priority=50
+
+[netdata-repoconfig]
+name=Netdata Repository Config
+baseurl=https://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+sslverify=1
+priority=50
diff --git a/packaging/repoconfig/netdata.repo.fedora b/packaging/repoconfig/netdata.repo.fedora
deleted file mode 100644
index e13262acb..000000000
--- a/packaging/repoconfig/netdata.repo.fedora
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata]
-name=Netdata
-baseurl=https://repo.netdata.cloud/repos/stable/fedora/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/fedora/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata.repo.ol b/packaging/repoconfig/netdata.repo.ol
deleted file mode 100644
index 0488670d4..000000000
--- a/packaging/repoconfig/netdata.repo.ol
+++ /dev/null
@@ -1,21 +0,0 @@
-[netdata]
-name=Netdata
-baseurl=https://repo.netdata.cloud/repos/stable/ol/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
-
-[netdata-repoconfig]
-name=Netdata Repository Config
-baseurl=https://repo.netdata.cloud/repos/repoconfig/ol/$releasever/$basearch
-repo_gpgcheck=1
-gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-sslverify=1
-sslcacert=/etc/pki/tls/certs/ca-bundle.crt
-priority=50
diff --git a/packaging/repoconfig/netdata.repo.suse b/packaging/repoconfig/netdata.repo.suse
deleted file mode 100644
index 8204d8d4d..000000000
--- a/packaging/repoconfig/netdata.repo.suse
+++ /dev/null
@@ -1,19 +0,0 @@
-[netdata]
-name=Netdata
-baseurl=https://repo.netdata.cloud/repos/stable/opensuse/$releasever/$basearch
-repo_gpgcheck=1
-pkg_gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-type=rpm-md
-autorefresh=1
-
-[netdata-repoconfig]
-name=Netdata Repoconfig
-baseurl=https://repo.netdata.cloud/repos/repoconfig/opensuse/$releasever/$basearch
-repo_gpgcheck=1
-pkg_gpgcheck=1
-gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
-enabled=1
-type=rpm-md
-autorefresh=1
diff --git a/packaging/repoconfig/netdata.repo.zypp b/packaging/repoconfig/netdata.repo.zypp
new file mode 100644
index 000000000..9ab847343
--- /dev/null
+++ b/packaging/repoconfig/netdata.repo.zypp
@@ -0,0 +1,19 @@
+[netdata]
+name=Netdata
+baseurl=https://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
+
+[netdata-repoconfig]
+name=Netdata Repoconfig
+baseurl=https://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch
+repo_gpgcheck=1
+pkg_gpgcheck=1
+gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key
+enabled=1
+type=rpm-md
+autorefresh=1
diff --git a/packaging/repoconfig/netdata.sources.in b/packaging/repoconfig/netdata.sources.in
new file mode 100644
index 000000000..926b2c453
--- /dev/null
+++ b/packaging/repoconfig/netdata.sources.in
@@ -0,0 +1,15 @@
+X-Repolib-Name: Netdata @VARIANT@ repository
+Types: deb
+URIs: http://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/
+Suites: @SUITE@/
+Signed-By: /usr/share/keyrings/netdata-archive-keyring.gpg
+By-Hash: No
+Enabled: Yes
+
+X-Repolib-Name: Netdata repository configuration repository
+Types: deb
+URIs: http://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/
+Suites: @SUITE@/
+Signed-By: /usr/share/keyrings/netdata-archive-keyring.gpg
+By-Hash: No
+Enabled: Yes
diff --git a/packaging/repoconfig/rpm.changelog b/packaging/repoconfig/rpm.changelog
new file mode 100644
index 000000000..dab81a2cf
--- /dev/null
+++ b/packaging/repoconfig/rpm.changelog
@@ -0,0 +1,18 @@
+* Mon Aug 19 2024 Austin Hemmelgarn <austin@netdata.cloud
+- Version bump to stay in sync with DEB packages.
+* Fri Aug 9 2024 Austin Hemmelgarn <austin@netdata.cloud> 3-3
+- Use system certificate config for Yum/DNF repos.
+* Mon Jun 24 2024 Austin Hemmelgarn <austin@netdata.cloud> 3-2
+- Fix package file names.
+* Fri Jun 14 2024 Austin Hemmelgarn <austin@netdata.cloud> 3-1
+- Migrate package build infrastructure to CPack.
+* Wed Apr 10 2024 Paul Szymanski <mail@pszy.de> 2-3
+- Fix repo specification for Amazon Linux 2023.
+* Mon Nov 13 2023 Austin Hemmelgarn <austin@netdata.cloud> 2-2
+- Add EPEL requirement for RHEL packages.
+* Wed Dec 7 2022 Austin Hemmelgarn <austin@netdata.cloud> 2-1
+- Switch to new hosting at repo.netdata.cloud.
+* Mon Jun 6 2022 Austin Hemmelgarn <austin@netdata.cloud> 1-2
+- Bump release to keep in sync with DEB package.
+* Mon Jun 14 2021 Austin Hemmelgarn <austin@netdata.cloud> 1-1
+- Initial revision
diff --git a/packaging/runtime-check.sh b/packaging/runtime-check.sh
new file mode 100755
index 000000000..969600f00
--- /dev/null
+++ b/packaging/runtime-check.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+
+wait_for() {
+ host="${1}"
+ port="${2}"
+ name="${3}"
+ timeout="30"
+
+ if command -v nc > /dev/null ; then
+ netcat="nc"
+ elif command -v netcat > /dev/null ; then
+ netcat="netcat"
+ else
+ printf "Unable to find a usable netcat command.\n"
+ return 1
+ fi
+
+ printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}"
+
+ sleep 30
+
+ i=0
+ while ! ${netcat} -z "${host}" "${port}"; do
+ sleep 1
+ if [ "$i" -gt "$timeout" ]; then
+ printf "Timed out!\n"
+ return 2
+ fi
+ i="$((i + 1))"
+ done
+ printf "OK\n"
+}
+
+wait_for localhost 19999 netdata
+
+case $? in
+ 1) exit 2 ;;
+ 2) exit 3 ;;
+esac
+
+curl -sfS http://127.0.0.1:19999/api/v1/info > ./response || exit 1
+
+cat ./response
+
+jq '.version' ./response || exit 1
+
+curl -sfS http://127.0.0.1:19999/index.html || exit 1
+curl -sfS http://127.0.0.1:19999/v0/index.html || exit 1
+curl -sfS http://127.0.0.1:19999/v1/index.html || exit 1
+curl -sfS http://127.0.0.1:19999/v2/index.html || exit 1
+
+NETDATA_LIBEXEC_PARTS="
+plugins.d/apps.plugin
+plugins.d/cgroup-network
+plugins.d/charts.d.plugin
+plugins.d/cups.plugin
+plugins.d/debugfs.plugin
+plugins.d/ebpf.plugin
+plugins.d/freeipmi.plugin
+plugins.d/go.d.plugin
+plugins.d/ioping.plugin
+plugins.d/local-listeners
+plugins.d/ndsudo
+plugins.d/network-viewer.plugin
+plugins.d/nfacct.plugin
+plugins.d/perf.plugin
+plugins.d/python.d.plugin
+plugins.d/slabinfo.plugin
+plugins.d/xenstat.plugin
+"
+
+if [ -d "${NETDATA_LIBEXEC_PREFIX}" ]; then
+ success=1
+ for part in ${NETDATA_LIBEXEC_PARTS}; do
+ # shellcheck disable=SC2254
+ if echo "${part}" | grep -qE "${NETDATA_SKIP_LIBEXEC_PARTS}"; then
+ continue
+ fi
+
+ if [ ! -x "${NETDATA_LIBEXEC_PREFIX}/${part}" ]; then
+ success=0
+ echo "!!! ${NETDATA_LIBEXEC_PREFIX}/${part} is missing"
+ fi
+ done
+
+ if [ "${success}" -eq 0 ]; then
+ exit 1
+ fi
+fi
diff --git a/packaging/utils/coverity-scan.sh b/packaging/utils/coverity-scan.sh
new file mode 100755
index 000000000..ebb82102d
--- /dev/null
+++ b/packaging/utils/coverity-scan.sh
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+#
+# Coverity scan script
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Costa Tsaousis (costa@netdata.cloud)
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+# shellcheck disable=SC1091,SC2230,SC2086
+
+# To run manually, save configuration to .coverity-scan.conf like this:
+#
+# the repository to report to coverity - devs can set here their own fork
+# REPOSITORY="netdata/netdata"
+#
+# the email of the developer, as given to coverity
+# COVERITY_SCAN_SUBMIT_MAIL="you@example.com"
+#
+# the token given by coverity to the developer
+# COVERITY_SCAN_TOKEN="TOKEN taken from Coverity site"
+#
+# the absolute path of the cov-build - optional
+# COVERITY_BUILD_PATH="/opt/cov-analysis-linux64-2021.12/bin/cov-build"
+#
+# when set, the script will print on screen the curl command that submits the build to coverity
+# this includes the token, so the default is not to print it.
+# COVERITY_SUBMIT_DEBUG=1
+#
+# All these variables can also be exported before running this script.
+#
+# If the first parameter of this script is "install",
+# coverity build tools will be downloaded and installed in /opt/coverity
+
+set -e
+
+if [ "$(uname -s)" != "Linux" ] || [ "$(uname -m)" != "x86_64" ]; then
+ echo "This script can only be used on a 64-bit x86 Linux system."
+ exit 1
+fi
+
+INSTALL_DIR="/opt"
+
+SCRIPT_SOURCE="$(
+ self=${0}
+ while [ -L "${self}" ]
+ do
+ cd "${self%/*}" || exit 1
+ self=$(readlink "${self}")
+ done
+ cd "${self%/*}" || exit 1
+ echo "$(pwd -P)/${self##*/}"
+)"
+REPO_ROOT="$(dirname "${SCRIPT_SOURCE}")/../.."
+
+. "${REPO_ROOT}/packaging/installer/functions.sh"
+
+JOBS=$(find_processors)
+[ -z "${JOBS}" ] && JOBS=1
+
+if command -v ninja 2>&1; then
+ ninja="$(command -v ninja)"
+fi
+
+CMAKE_OPTS="${ninja:+-G Ninja}"
+BUILD_OPTS="VERBOSE=1"
+[ -n "${ninja}" ] && BUILD_OPTS="-v"
+NETDATA_BUILD_DIR="${NETDATA_BUILD_DIR:-./build/}"
+
+if [ -f ".coverity-scan.conf" ]; then
+ source ".coverity-scan.conf"
+fi
+
+repo="${REPOSITORY}"
+if [ -z "${repo}" ]; then
+ fatal "export variable REPOSITORY or set it in .coverity-scan.conf"
+fi
+repo="${repo//\//%2F}"
+
+email="${COVERITY_SCAN_SUBMIT_MAIL}"
+if [ -z "${email}" ]; then
+ fatal "export variable COVERITY_SCAN_SUBMIT_MAIL or set it in .coverity-scan.conf"
+fi
+
+token="${COVERITY_SCAN_TOKEN}"
+if [ -z "${token}" ]; then
+ fatal "export variable COVERITY_SCAN_TOKEN or set it in .coverity-scan.conf"
+fi
+
+if ! command -v curl > /dev/null 2>&1; then
+ fatal "CURL is required for coverity scan to work"
+fi
+
+# only print the output of a command
+# when debugging is enabled
+# used to hide the token when debugging is not enabled
+debugrun() {
+ if [ "${COVERITY_SUBMIT_DEBUG}" = "1" ]; then
+ run "${@}"
+ return $?
+ else
+ "${@}"
+ return $?
+ fi
+}
+
+scanit() {
+ progress "Scanning using coverity"
+ COVERITY_PATH=$(find "${INSTALL_DIR}" -maxdepth 1 -name 'cov*linux*')
+ export PATH=${PATH}:${COVERITY_PATH}/bin/
+ covbuild="${COVERITY_BUILD_PATH}"
+ [ -z "${covbuild}" ] && covbuild="$(which cov-build 2> /dev/null || command -v cov-build 2> /dev/null)"
+
+ if [ -z "${covbuild}" ]; then
+ fatal "Cannot find 'cov-build' binary in \$PATH. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ elif [ ! -x "${covbuild}" ]; then
+ fatal "The command '${covbuild}' is not executable. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ fi
+
+ cd "${REPO_ROOT}" || exit 1
+
+ version="$(grep "^#define PACKAGE_VERSION" config.h | cut -d '"' -f 2)"
+ progress "Working on netdata version: ${version}"
+
+ progress "Cleaning up old builds..."
+ rm -rf "${NETDATA_BUILD_DIR}"
+
+ [ -d "cov-int" ] && rm -rf "cov-int"
+
+ [ -f netdata-coverity-analysis.tgz ] && run rm netdata-coverity-analysis.tgz
+
+ progress "Configuring netdata source..."
+ USE_SYSTEM_PROTOBUF=1
+ ENABLE_GO=0
+ prepare_cmake_options
+
+ run cmake ${NETDATA_CMAKE_OPTIONS}
+
+ progress "Analyzing netdata..."
+ run "${covbuild}" --dir cov-int cmake --build "${NETDATA_BUILD_DIR}" --parallel ${JOBS} -- ${BUILD_OPTS}
+
+ echo >&2 "Compressing analysis..."
+ run tar czvf netdata-coverity-analysis.tgz cov-int
+
+ echo >&2 "Sending analysis to coverity for netdata version ${version} ..."
+ COVERITY_SUBMIT_RESULT=$(debugrun curl --progress-bar \
+ --form token="${token}" \
+ --form email="${email}" \
+ --form file=@netdata-coverity-analysis.tgz \
+ --form version="${version}" \
+ --form description="netdata, monitor everything, in real-time." \
+ https://scan.coverity.com/builds?project="${repo}")
+
+ echo "${COVERITY_SUBMIT_RESULT}" | grep -q -e 'Build successfully submitted' || echo >&2 "scan results were not pushed to coverity. Message was: ${COVERITY_SUBMIT_RESULT}"
+
+ progress "Coverity scan completed"
+}
+
+installit() {
+ TMP_DIR="$(mktemp -d /tmp/netdata-coverity-scan-XXXXX)"
+ progress "Downloading coverity in ${TMP_DIR}..."
+ (cd "${TMP_DIR}" && debugrun curl --remote-name --remote-header-name --show-error --location --data "token=${token}&project=${repo}" https://scan.coverity.com/download/linux64)
+
+ COVERITY_ARCHIVE="$(find "${TMP_DIR}" -maxdepth 1 -mindepth 1 -name 'cov-analysis-linux64-*.tar.gz')"
+
+ if [ -n "${COVERITY_ARCHIVE}" ] && [ -f "${COVERITY_ARCHIVE}" ]; then
+ progress "Installing coverity..."
+ run sudo tar -z -x -f "${COVERITY_ARCHIVE}" -C "${INSTALL_DIR}"
+ rm -f "${COVERITY_ARCHIVE}"
+ COVERITY_PATH=$(find "${INSTALL_DIR}" -maxdepth 1 -name 'cov*linux*')
+ export PATH="${PATH}:${COVERITY_PATH}/bin/"
+ elif find "${TMP_DIR}" -name "*.tar.gz" > /dev/null 2>&1; then
+ ls "${TMP_DIR}"/*.tar.gz
+ fatal "Downloaded coverity tool tarball does not appear to be the file-name we were expecting, exiting."
+ else
+ fatal "Failed to download coverity tool tarball!"
+ fi
+
+ # Validate the installation
+ covbuild="$(which cov-build 2> /dev/null || command -v cov-build 2> /dev/null)"
+ if [ -z "$covbuild" ]; then
+ fatal "Failed to install coverity."
+ fi
+
+ progress "Coverity scan tools are installed."
+
+ # Clean temp directory
+ [ -n "${TMP_DIR}" ] && rm -rf "${TMP_DIR}"
+ return 0
+}
+
+FOUND_OPTS="NO"
+while [ -n "${1}" ]; do
+ if [ "${1}" = "--with-install" ]; then
+ progress "Running coverity install"
+ installit
+ shift 1
+ elif [ -n "${1}" ]; then
+ # Clear the default arguments, once you bump into the first argument
+ if [ "${FOUND_OPTS}" = "NO" ]; then
+ OTHER_OPTIONS="${1}"
+ FOUND_OPTS="YES"
+ else
+ OTHER_OPTIONS+=" ${1}"
+ fi
+
+ shift 1
+ else
+ break
+ fi
+done
+
+echo "Running coverity scan with extra options ${OTHER_OPTIONS}"
+scanit "${OTHER_OPTIONS}"
diff --git a/packaging/utils/find-dll-deps.sh b/packaging/utils/find-dll-deps.sh
new file mode 100644
index 000000000..9f4fe3847
--- /dev/null
+++ b/packaging/utils/find-dll-deps.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+if [ "$#" -lt 1 ]; then
+ echo "Usage: $0 <command1> <command2> ... <commandN>"
+ exit 1
+fi
+
+results=()
+
+for arg in "$@"; do
+ while IFS= read -r line; do
+ results+=("$line")
+ done < <(ldd "$arg" | grep /usr/bin | awk '{ print $3 }')
+done
+
+printf "%s\n" "${results[@]}" | sort | uniq
diff --git a/packaging/version b/packaging/version
index 0ade2bb9e..7fa3401a1 100644
--- a/packaging/version
+++ b/packaging/version
@@ -1 +1 @@
-v1.44.3
+v1.47.2
diff --git a/packaging/windows/NetdataWhite.ico b/packaging/windows/NetdataWhite.ico
new file mode 100644
index 000000000..857b88062
--- /dev/null
+++ b/packaging/windows/NetdataWhite.ico
Binary files differ
diff --git a/packaging/windows/bash_execute.sh b/packaging/windows/bash_execute.sh
new file mode 100755
index 000000000..4092db966
--- /dev/null
+++ b/packaging/windows/bash_execute.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/bash
+
+convert_path() {
+ local ARG="$1"
+ ARG="${ARG//C:\\//c/}"
+ ARG="${ARG//c:\\//c/}"
+ ARG="${ARG//C:\///c/}"
+ ARG="${ARG//c:\///c/}"
+
+ echo "$ARG"
+}
+
+declare params=()
+for x in "${@}"
+do
+ params+=("$(convert_path "${x}")")
+done
+
+"${params[@]}"
diff --git a/packaging/windows/build.ps1 b/packaging/windows/build.ps1
new file mode 100644
index 000000000..f656ed568
--- /dev/null
+++ b/packaging/windows/build.ps1
@@ -0,0 +1,16 @@
+# Run the build
+
+#Requires -Version 4.0
+
+$ErrorActionPreference = "Stop"
+
+. "$PSScriptRoot\functions.ps1"
+
+$msysbash = Get-MSYS2Bash "$msysprefix"
+$env:CHERE_INVOKING = 'yes'
+
+& $msysbash -l "$PSScriptRoot\compile-on-windows.sh"
+
+if ($LastExitcode -ne 0) {
+ exit 1
+}
diff --git a/packaging/windows/clion-msys-mingw64-environment.bat b/packaging/windows/clion-msys-mingw64-environment.bat
new file mode 100644
index 000000000..19035d8eb
--- /dev/null
+++ b/packaging/windows/clion-msys-mingw64-environment.bat
@@ -0,0 +1,17 @@
+@echo off
+:: In Clion Toolchains
+:: 1. Add a MinGW profile
+:: 2. Set Toolset to C:\msys64\mingw64
+:: 3. Add environment and set the full path to this file, like:
+:: C:\msys64\home\costa\src\netdata-ktsaou.git\packaging\utils\clion-mingw64-environment.bat
+:: 4. Let everything else to Bundled and auto-detected
+::
+set "batch_dir=%~dp0"
+set "batch_dir=%batch_dir:\=/%"
+set MSYSTEM=MINGW64
+set GOROOT=C:\msys64\mingw64
+set PATH="%PATH%;C:\msys64\mingw64\bin;C:\msys64\usr\bin;C:\msys64\bin"
+::set PKG_CONFIG_EXECUTABLE=C:\msys64\mingw64\bin\pkg-config.exe
+::set CMAKE_C_COMPILER=C:\msys64\mingw64\bin\gcc.exe
+::set CMAKE_CC_COMPILER=C:\msys64\mingw64\bin\g++.exe
+set PROTOBUF_PROTOC_EXECUTABLE=C:/msys64/mingw64/bin/protoc.exe
diff --git a/packaging/windows/clion-msys-msys-environment.bat b/packaging/windows/clion-msys-msys-environment.bat
new file mode 100644
index 000000000..9f0c095d3
--- /dev/null
+++ b/packaging/windows/clion-msys-msys-environment.bat
@@ -0,0 +1,20 @@
+@echo off
+:: In Clion Toolchains
+:: 1. Add a MinGW profile
+:: 2. Set Toolset to C:\msys64\mingw64
+:: 3. Add environment and set the full path to this file, like:
+:: C:\msys64\home\costa\src\netdata-ktsaou.git\packaging\utils\clion-mingw64-environment.bat
+:: 4. Let everything else to Bundled and auto-detected
+::
+set "batch_dir=%~dp0"
+set "batch_dir=%batch_dir:\=/%"
+set MSYSTEM=MSYS
+
+:: go exists only mingw64 / ucrt64 / etc, not under msys profile
+set GOROOT=C:\msys64\mingw64
+
+set PATH="%PATH%;C:\msys64\usr\bin;C:\msys64\bin;C:\msys64\mingw64\bin"
+::set PKG_CONFIG_EXECUTABLE=C:\msys64\mingw64\bin\pkg-config.exe
+::set CMAKE_C_COMPILER=C:\msys64\mingw64\bin\gcc.exe
+::set CMAKE_CC_COMPILER=C:\msys64\mingw64\bin\g++.exe
+set PROTOBUF_PROTOC_EXECUTABLE=%batch_dir%/protoc.bat
diff --git a/packaging/windows/compile-on-windows.sh b/packaging/windows/compile-on-windows.sh
new file mode 100755
index 000000000..ceb4f5502
--- /dev/null
+++ b/packaging/windows/compile-on-windows.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+REPO_ROOT="$(dirname "$(dirname "$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd -P)")")"
+CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE:-RelWithDebInfo}"
+
+# shellcheck source=./win-build-dir.sh
+. "${REPO_ROOT}/packaging/windows/win-build-dir.sh"
+
+set -exu -o pipefail
+
+if [ -d "${build}" ]; then
+ rm -rf "${build}"
+fi
+
+generator="Unix Makefiles"
+build_args="-j $(nproc)"
+
+if command -v ninja >/dev/null 2>&1; then
+ generator="Ninja"
+ build_args="-k 1"
+fi
+
+COMMON_CFLAGS="-Wa,-mbig-obj -pipe -D_FILE_OFFSET_BITS=64 -D__USE_MINGW_ANSI_STDIO=1"
+
+if [ "${CMAKE_BUILD_TYPE}" = "Debug" ]; then
+ BUILD_CFLAGS="-fstack-protector-all -O0 -ggdb -Wall -Wextra -Wno-char-subscripts -DNETDATA_INTERNAL_CHECKS=1 ${COMMON_CFLAGS} ${CFLAGS:-}"
+else
+ BUILD_CFLAGS="-O2 ${COMMON_CFLAGS} ${CFLAGS:-}"
+fi
+
+${GITHUB_ACTIONS+echo "::group::Configuring"}
+# shellcheck disable=SC2086
+CFLAGS="${BUILD_CFLAGS}" /usr/bin/cmake \
+ -S "${REPO_ROOT}" \
+ -B "${build}" \
+ -G "${generator}" \
+ -DCMAKE_INSTALL_PREFIX="/opt/netdata" \
+ -DBUILD_FOR_PACKAGING=On \
+ -DNETDATA_USER="${USER}" \
+ -DENABLE_ACLK=On \
+ -DENABLE_CLOUD=On \
+ -DENABLE_H2O=Off \
+ -DENABLE_ML=On \
+ -DENABLE_PLUGIN_GO=On \
+ -DENABLE_EXPORTER_PROMETHEUS_REMOTE_WRITE=Off \
+ -DENABLE_BUNDLED_JSONC=On \
+ -DENABLE_BUNDLED_PROTOBUF=Off \
+ ${EXTRA_CMAKE_OPTIONS:-}
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+${GITHUB_ACTIONS+echo "::group::Building"}
+# shellcheck disable=SC2086
+cmake --build "${build}" -- ${build_args}
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+if [ -t 1 ]; then
+ echo
+ echo "Compile with:"
+ echo "cmake --build \"${build}\""
+fi
diff --git a/packaging/windows/fetch-msys2-installer.py b/packaging/windows/fetch-msys2-installer.py
new file mode 100755
index 000000000..e30e7205c
--- /dev/null
+++ b/packaging/windows/fetch-msys2-installer.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+'''Fetch the MSYS2 installer.'''
+
+from __future__ import annotations
+
+import hashlib
+import json
+import shutil
+import sys
+
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from typing import Final
+from urllib.request import Request, urlopen
+
+REPO: Final = 'msys2/msys2-installer'
+
+
+def get_latest_release() -> tuple[str, str]:
+ '''Get the latest release for the repo.'''
+ REQUEST: Final = Request(
+ url=f'https://api.github.com/repos/{REPO}/releases',
+ headers={
+ 'Accept': 'application/vnd.github+json',
+ 'X-GitHub-API-Version': '2022-11-28',
+ },
+ method='GET',
+ )
+
+ print('>>> Fetching release list')
+
+ with urlopen(REQUEST, timeout=15) as response:
+ if response.status != 200:
+ print(f'!!! Failed to fetch release list, status={response.status}')
+ sys.exit(1)
+
+ data = json.load(response)
+
+ data = list(filter(lambda x: x['name'] != 'Nightly Installer Build', data))
+
+ name = data[0]['name']
+ version = data[0]['tag_name'].replace('-', '')
+
+ return name, version
+
+
+def fetch_release_asset(tmpdir: Path, name: str, file: str) -> Path:
+ '''Fetch a specific release asset.'''
+ REQUEST: Final = Request(
+ url=f'https://github.com/{REPO}/releases/download/{name}/{file}',
+ method='GET',
+ )
+ TARGET: Final = tmpdir / file
+
+ print(f'>>> Downloading {file}')
+
+ with urlopen(REQUEST, timeout=15) as response:
+ if response.status != 200:
+ print(f'!!! Failed to fetch {file}, status={response.status}')
+ sys.exit(1)
+
+ TARGET.write_bytes(response.read())
+
+ return TARGET
+
+
+def main() -> None:
+ '''Core program logic.'''
+ if len(sys.argv) != 2:
+ print(f'{__file__} must be run with exactly one argument.')
+
+ target = Path(sys.argv[1])
+ tmp_target = target.with_name(f'.{target.name}.tmp')
+
+ name, version = get_latest_release()
+
+ with TemporaryDirectory() as tmpdir:
+ tmppath = Path(tmpdir)
+
+ installer = fetch_release_asset(tmppath, name, f'msys2-base-x86_64-{version}.tar.zst')
+ checksums = fetch_release_asset(tmppath, name, f'msys2-base-x86_64-{version}.tar.zst.sha256')
+
+ print('>>> Verifying SHA256 checksum')
+ expected_checksum = checksums.read_text().partition(' ')[0].casefold()
+ actual_checksum = hashlib.sha256(installer.read_bytes()).hexdigest().casefold()
+
+ if expected_checksum != actual_checksum:
+ print('!!! Checksum mismatch')
+ print(f'!!! Expected: {expected_checksum}')
+ print(f'!!! Actual: {actual_checksum}')
+ sys.exit(1)
+
+ print(f'>>> Copying to {target}')
+
+ shutil.copy(installer, tmp_target)
+ tmp_target.replace(target)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/windows/functions.ps1 b/packaging/windows/functions.ps1
new file mode 100644
index 000000000..a5f032daa
--- /dev/null
+++ b/packaging/windows/functions.ps1
@@ -0,0 +1,31 @@
+# Functions used by the PowerShell scripts in this directory.
+
+#Requires -Version 4.0
+
+function Get-MSYS2Prefix {
+ if (-Not ($msysprefix)) {
+ if (Test-Path -Path C:\msys64\usr\bin\bash.exe) {
+ return "C:\msys64"
+ } elseif ($env:ChocolateyToolsLocation) {
+ if (Test-Path -Path "$env:ChocolateyToolsLocation\msys64\usr\bin\bash.exe") {
+ Write-Host "Found MSYS2 installed via Chocolatey"
+ Write-Host "This will work for building Netdata, but not for packaging it"
+ return "$env:ChocolateyToolsLocation\msys64"
+ }
+ }
+ }
+
+ return ""
+}
+
+function Get-MSYS2Bash {
+ $msysprefix = $args[0]
+
+ if (-Not ($msysprefix)) {
+ $msysprefix = Get-MSYS2Prefix
+ }
+
+ Write-Host "Using MSYS2 from $msysprefix"
+
+ return "$msysprefix\usr\bin\bash.exe"
+}
diff --git a/packaging/windows/install-dependencies.ps1 b/packaging/windows/install-dependencies.ps1
new file mode 100644
index 000000000..66ec73160
--- /dev/null
+++ b/packaging/windows/install-dependencies.ps1
@@ -0,0 +1,84 @@
+# Set up Windows build dependencies.
+#
+# This script first sees if msys is installed. If so, it just uses it. If not, it tries to bootstrap it with chocolatey or winget.
+
+#Requires -Version 4.0
+
+$ErrorActionPreference = "Stop"
+
+. "$PSScriptRoot\functions.ps1"
+
+$msysprefix = Get-MSYS2Prefix
+
+function Check-FileHash {
+ $file_path = $args[0]
+
+ Write-Host "Checking SHA256 hash of $file_path"
+
+ $actual_hash = (Get-FileHash -Algorithm SHA256 -Path $file_path).Hash.toLower()
+ $expected_hash = (Get-Content "$file_path.sha256").split()[0]
+
+ if ($actual_hash -ne $expected_hash) {
+ Write-Host "SHA256 hash mismatch!"
+ Write-Host "Expected: $expected_hash"
+ Write-Host "Actual: $actual_hash"
+ exit 1
+ }
+}
+
+function Install-MSYS2 {
+ $repo = 'msys2/msys2-installer'
+ $uri = "https://api.github.com/repos/$repo/releases"
+ $headers = @{
+ 'Accept' = 'application/vnd.github+json'
+ 'X-GitHub-API-Version' = '2022-11-28'
+ }
+ $installer_path = "$env:TEMP\msys2-base.exe"
+
+ if ($env:PROCESSOR_ARCHITECTURE -ne "AMD64") {
+ Write-Host "We can only install MSYS2 for 64-bit x86 systems, but you appear to have a different processor architecture ($env:PROCESSOR_ARCHITECTURE)."
+ Write-Host "You will need to install MSYS2 yourself instead."
+ exit 1
+ }
+
+ Write-Host "Determining latest release"
+ $release_list = Invoke-RestMethod -Uri $uri -Headers $headers -TimeoutSec 30
+
+ $release = $release_list[0]
+ $release_name = $release.name
+ $version = $release.tag_name.Replace('-', '')
+ $installer_url = "https://github.com/$repo/releases/download/$release_name/msys2-x86_64-$version.exe"
+
+ Write-Host "Fetching $installer_url"
+ Invoke-WebRequest $installer_url -OutFile $installer_path
+ Write-Host "Fetching $installer_url.sha256"
+ Invoke-WebRequest "$installer_url.sha256" -OutFile "$installer_path.sha256"
+
+ Write-Host "Checking file hash"
+ Check-FileHash $installer_path
+
+ Write-Host "Installing"
+ & $installer_path in --confirm-command --accept-messages --root C:/msys64
+
+ return "C:\msys64"
+}
+
+if (-Not ($msysprefix)) {
+ Write-Host "Could not find MSYS2, attempting to install it"
+ $msysprefix = Install-MSYS2
+}
+
+$msysbash = Get-MSYS2Bash "$msysprefix"
+$env:CHERE_INVOKING = 'yes'
+
+& $msysbash -l "$PSScriptRoot\msys2-dependencies.sh"
+
+if ($LastExitcode -ne 0) {
+ Write-Host "First update attempt failed. This is expected if the msys-runtime package needed updated, trying again."
+
+ & $msysbash -l "$PSScriptRoot\msys2-dependencies.sh"
+
+ if ($LastExitcode -ne 0) {
+ exit 1
+ }
+}
diff --git a/packaging/windows/installer.nsi b/packaging/windows/installer.nsi
new file mode 100644
index 000000000..88d160a1d
--- /dev/null
+++ b/packaging/windows/installer.nsi
@@ -0,0 +1,186 @@
+!include "MUI2.nsh"
+!include "nsDialogs.nsh"
+!include "FileFunc.nsh"
+
+Name "Netdata"
+Outfile "netdata-installer.exe"
+InstallDir "$PROGRAMFILES\Netdata"
+RequestExecutionLevel admin
+
+!define MUI_ICON "NetdataWhite.ico"
+!define MUI_UNICON "NetdataWhite.ico"
+
+!define ND_UININSTALL_REG "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata"
+
+!define MUI_ABORTWARNING
+!define MUI_UNABORTWARNING
+
+!insertmacro MUI_PAGE_WELCOME
+!insertmacro MUI_PAGE_LICENSE "C:\msys64\cloud.txt"
+!insertmacro MUI_PAGE_LICENSE "C:\msys64\gpl-3.0.txt"
+!insertmacro MUI_PAGE_DIRECTORY
+!insertmacro MUI_PAGE_INSTFILES
+Page Custom NetdataConfigPage NetdataConfigLeave
+!insertmacro MUI_PAGE_FINISH
+
+!insertmacro MUI_UNPAGE_CONFIRM
+!insertmacro MUI_UNPAGE_INSTFILES
+!insertmacro MUI_UNPAGE_FINISH
+
+!insertmacro MUI_LANGUAGE "English"
+
+var hStartMsys
+var startMsys
+
+var hCloudToken
+var cloudToken
+var hCloudRoom
+var cloudRoom
+
+Function .onInit
+ nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata'
+ pop $0
+ ${If} $0 == 0
+ nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata'
+ pop $0
+ ${EndIf}
+
+ StrCpy $startMsys ${BST_UNCHECKED}
+FunctionEnd
+
+Function NetdataConfigPage
+ !insertmacro MUI_HEADER_TEXT "Netdata configuration" "Claim your agent on Netdata Cloud"
+
+ nsDialogs::Create 1018
+ Pop $0
+ ${If} $0 == error
+ Abort
+ ${EndIf}
+
+ ${NSD_CreateLabel} 0 0 100% 12u "Enter your Token and Cloud Room."
+ ${NSD_CreateLabel} 0 15% 100% 12u "Optionally, you can open a terminal to execute additional commands."
+
+ ${NSD_CreateLabel} 0 35% 20% 10% "Token"
+ Pop $0
+ ${NSD_CreateText} 21% 35% 79% 10% ""
+ Pop $hCloudToken
+
+ ${NSD_CreateLabel} 0 55% 20% 10% "Room"
+ Pop $0
+ ${NSD_CreateText} 21% 55% 79% 10% ""
+ Pop $hCloudRoom
+
+ ${NSD_CreateCheckbox} 0 70% 100% 10u "Open terminal"
+ Pop $hStartMsys
+ nsDialogs::Show
+FunctionEnd
+
+Function NetdataConfigLeave
+ ${NSD_GetText} $hCloudToken $cloudToken
+ ${NSD_GetText} $hCloudRoom $cloudRoom
+ ${NSD_GetState} $hStartMsys $startMsys
+
+ StrLen $0 $cloudToken
+ StrLen $1 $cloudRoom
+ ${If} $0 == 125
+ ${AndIf} $0 == 36
+ # We should start our new claiming software here
+ MessageBox MB_OK "$cloudToken | $cloudRoom | $startMsys"
+ ${EndIf}
+
+ ${If} $startMsys == 1
+ nsExec::ExecToLog '$INSTDIR\msys2.exe'
+ pop $0
+ ${EndIf}
+FunctionEnd
+
+Function NetdataUninstallRegistry
+ ClearErrors
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayName" "Netdata - Real-time system monitoring."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayIcon" "$INSTDIR\Uninstall.exe,0"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "UninstallString" "$INSTDIR\Uninstall.exe"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "RegOwner" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "RegCompany" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "Publisher" "Netdata Inc."
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "HelpLink" "https://learn.netdata.cloud/"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "URLInfoAbout" "https://www.netdata.cloud/"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "DisplayVersion" "${CURRVERSION}"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "VersionMajor" "${MAJORVERSION}"
+ WriteRegStr HKLM "${ND_UININSTALL_REG}" \
+ "VersionMinor" "${MINORVERSION}"
+
+ IfErrors 0 +2
+ MessageBox MB_ICONEXCLAMATION|MB_OK "Unable to create an entry in the Control Panel!" IDOK end
+
+ ClearErrors
+ ${GetSize} "$INSTDIR" "/S=0K" $0 $1 $2
+ IntFmt $0 "0x%08X" $0
+ WriteRegDWORD HKLM "${ND_UININSTALL_REG}" "EstimatedSize" "$0"
+
+ IfErrors 0 +2
+ MessageBox MB_ICONEXCLAMATION|MB_OK "Cannot estimate the installation size." IDOK end
+ end:
+FunctionEnd
+
+Section "Install Netdata"
+ SetOutPath $INSTDIR
+ SetCompress off
+
+ File /r "C:\msys64\opt\netdata\*.*"
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe create Netdata binPath= "$INSTDIR\usr\bin\netdata.exe" start= delayed-auto'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to create Netdata service."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe description Netdata "Real-time system monitoring service"'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to add Netdata service description."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe start Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to start Netdata service."
+ ${EndIf}
+
+ WriteUninstaller "$INSTDIR\Uninstall.exe"
+
+ Call NetdataUninstallRegistry
+SectionEnd
+
+Section "Uninstall"
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to stop Netdata service."
+ ${EndIf}
+
+ ClearErrors
+ nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata'
+ pop $0
+ ${If} $0 != 0
+ DetailPrint "Warning: Failed to delete Netdata service."
+ ${EndIf}
+
+ RMDir /r "$INSTDIR"
+
+ DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata"
+SectionEnd
+
diff --git a/packaging/windows/invoke-msys2.ps1 b/packaging/windows/invoke-msys2.ps1
new file mode 100644
index 000000000..bffa7f90b
--- /dev/null
+++ b/packaging/windows/invoke-msys2.ps1
@@ -0,0 +1,16 @@
+# Invoke the specified script using MSYS2
+
+#Requires -Version 4.0
+
+$ErrorActionPreference = "Stop"
+
+. "$PSScriptRoot\functions.ps1"
+
+$msysbash = Get-MSYS2Bash "$msysprefix"
+$env:CHERE_INVOKING = 'yes'
+
+& $msysbash -l $args[0]
+
+if ($LastExitcode -ne 0) {
+ exit 1
+}
diff --git a/packaging/windows/msys2-dependencies.sh b/packaging/windows/msys2-dependencies.sh
new file mode 100755
index 000000000..95a1952df
--- /dev/null
+++ b/packaging/windows/msys2-dependencies.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Install the dependencies we need to build Netdata on MSYS2
+
+. /etc/profile
+
+set -euo pipefail
+
+${GITHUB_ACTIONS+echo "::group::Updating MSYS2"}
+pacman -Syuu --noconfirm
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+${GITHUB_ACTIONS+echo "::group::Installing dependencies"}
+pacman -S --noconfirm --needed \
+ base-devel \
+ cmake \
+ git \
+ liblz4-devel \
+ libutil-linux \
+ libutil-linux-devel \
+ libyaml-devel \
+ libzstd-devel \
+ mingw64/mingw-w64-x86_64-brotli \
+ mingw64/mingw-w64-x86_64-go \
+ mingw64/mingw-w64-x86_64-libuv \
+ mingw64/mingw-w64-x86_64-lz4 \
+ mingw64/mingw-w64-x86_64-nsis \
+ mingw64/mingw-w64-x86_64-openssl \
+ mingw64/mingw-w64-x86_64-pcre2 \
+ mingw64/mingw-w64-x86_64-protobuf \
+ mingw64/mingw-w64-x86_64-zlib \
+ mingw-w64-ucrt-x86_64-toolchain \
+ mingw-w64-x86_64-toolchain \
+ msys2-devel \
+ msys/brotli-devel \
+ msys/libuv-devel \
+ msys/pcre2-devel \
+ msys/zlib-devel \
+ openssl-devel \
+ protobuf-devel \
+ python \
+ ucrt64/mingw-w64-ucrt-x86_64-brotli \
+ ucrt64/mingw-w64-ucrt-x86_64-go \
+ ucrt64/mingw-w64-ucrt-x86_64-libuv \
+ ucrt64/mingw-w64-ucrt-x86_64-lz4 \
+ ucrt64/mingw-w64-ucrt-x86_64-openssl \
+ ucrt64/mingw-w64-ucrt-x86_64-pcre2 \
+ ucrt64/mingw-w64-ucrt-x86_64-protobuf \
+ ucrt64/mingw-w64-ucrt-x86_64-zlib
+${GITHUB_ACTIONS+echo "::endgroup::"}
diff --git a/packaging/windows/package-windows.sh b/packaging/windows/package-windows.sh
new file mode 100755
index 000000000..03f72a692
--- /dev/null
+++ b/packaging/windows/package-windows.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+repo_root="$(dirname "$(dirname "$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd -P)")")"
+
+if [ -n "${BUILD_DIR}" ]; then
+ build="${BUILD_DIR}"
+elif [ -n "${OSTYPE}" ]; then
+ if [ -n "${MSYSTEM}" ]; then
+ build="${repo_root}/build-${OSTYPE}-${MSYSTEM}"
+ else
+ build="${repo_root}/build-${OSTYPE}"
+ fi
+elif [ "$USER" = "vk" ]; then
+ build="${repo_root}/build"
+else
+ build="${repo_root}/build"
+fi
+
+set -exu -o pipefail
+
+${GITHUB_ACTIONS+echo "::group::Installing"}
+cmake --install "${build}"
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+if [ ! -f "/msys2-latest.tar.zst" ]; then
+ ${GITHUB_ACTIONS+echo "::group::Fetching MSYS2 files"}
+ "${repo_root}/packaging/windows/fetch-msys2-installer.py" /msys2-latest.tar.zst
+ ${GITHUB_ACTIONS+echo "::endgroup::"}
+fi
+
+${GITHUB_ACTIONS+echo "::group::Licenses"}
+if [ ! -f "/gpl-3.0.txt" ]; then
+ curl -o /gpl-3.0.txt "https://www.gnu.org/licenses/gpl-3.0.txt"
+fi
+
+if [ ! -f "/cloud.txt" ]; then
+ curl -o /cloud.txt "https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/LICENSE.md"
+fi
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
+${GITHUB_ACTIONS+echo "::group::Packaging"}
+tar -xf /msys2-latest.tar.zst -C /opt/netdata/ || exit 1
+cp -R /opt/netdata/msys64/* /opt/netdata/ || exit 1
+rm -rf /opt/netdata/msys64/
+NDVERSION=$"$(grep 'CMAKE_PROJECT_VERSION:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+NDMAJORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MAJOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+NDMINORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MINOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)"
+
+/mingw64/bin/makensis.exe -DCURRVERSION="${NDVERSION}" -DMAJORVERSION="${NDMAJORVERSION}" -DMINORVERSION="${NDMINORVERSION}" "${repo_root}/packaging/windows/installer.nsi"
+${GITHUB_ACTIONS+echo "::endgroup::"}
+
diff --git a/packaging/windows/package.ps1 b/packaging/windows/package.ps1
new file mode 100644
index 000000000..828e105f1
--- /dev/null
+++ b/packaging/windows/package.ps1
@@ -0,0 +1,16 @@
+# Package the build
+
+#Requires -Version 4.0
+
+$ErrorActionPreference = "Stop"
+
+. "$PSScriptRoot\functions.ps1"
+
+$msysbash = Get-MSYS2Bash "$msysprefix"
+$env:CHERE_INVOKING = 'yes'
+
+& $msysbash -l "$PSScriptRoot\package-windows.sh"
+
+if ($LastExitcode -ne 0) {
+ exit 1
+}
diff --git a/packaging/windows/protoc.bat b/packaging/windows/protoc.bat
new file mode 100644
index 000000000..fe7a76f27
--- /dev/null
+++ b/packaging/windows/protoc.bat
@@ -0,0 +1,9 @@
+@echo off
+::
+:: The problem with /usr/bin/protoc is that it accepts colon separated (:) paths at its parameters.
+:: This makes C:/ being parsed as 2 paths: C and /, which of course both fail.
+:: To overcome this problem, we use bash_execute.sh, which replaces all occurences of C: with /c.
+::
+set "batch_dir=%~dp0"
+set "batch_dir=%batch_dir:\=/%"
+C:\msys64\usr\bin\bash.exe %batch_dir%/bash_execute.sh protoc %*
diff --git a/packaging/windows/win-build-dir.sh b/packaging/windows/win-build-dir.sh
new file mode 100644
index 000000000..09dd6b977
--- /dev/null
+++ b/packaging/windows/win-build-dir.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+if [ -n "${BUILD_DIR}" ]; then
+ if (echo "${BUILD_DIR}" | grep -q -E "^[A-Z]:\\\\"); then
+ build="$(echo "${BUILD_DIR}" | sed -e 's/\\/\//g' -e 's/^\([A-Z]\):\//\/\1\//' -)"
+ else
+ build="${BUILD_DIR}"
+ fi
+elif [ -n "${OSTYPE}" ]; then
+ if [ -n "${MSYSTEM}" ]; then
+ build="${REPO_ROOT}/build-${OSTYPE}-${MSYSTEM}"
+ else
+ build="${REPO_ROOT}/build-${OSTYPE}"
+ fi
+elif [ "$USER" = "vk" ]; then
+ build="${REPO_ROOT}/build"
+else
+ # shellcheck disable=SC2034
+ build="${REPO_ROOT}/build"
+fi
diff --git a/packaging/windows/windows-openssh-to-msys.bat b/packaging/windows/windows-openssh-to-msys.bat
new file mode 100644
index 000000000..829cb4845
--- /dev/null
+++ b/packaging/windows/windows-openssh-to-msys.bat
@@ -0,0 +1,118 @@
+@echo off
+::
+:: This script will:
+::
+:: 1. install the windows OpenSSH server (either via dsim or download it)
+:: 2. activate the windows OpenSSH service
+:: 3. open OpenSSH TCP port at windows firewall
+:: 4. create a small batch file to start an MSYS session
+:: 5. Set the default OpenSSH startup script to start the MSYS session
+::
+:: Problems:
+:: On older windows versions, terminal emulation is broken.
+:: So, on windows 10 or windows server before 2019, the ssh session
+:: will not have proper terminal emulation and will be not be able to
+:: be used for editing files.
+:: For more info check:
+:: https://github.com/PowerShell/Win32-OpenSSH/issues/1260
+::
+
+:: Check if OpenSSH Server is already installed
+sc query sshd >nul 2>&1
+if %errorlevel% neq 0 (
+ echo "OpenSSH Server not found. Attempting to install via dism..."
+ goto :install_openssh_dism
+) else (
+ echo "OpenSSH Server is already installed."
+ goto :configure_openssh
+)
+
+:: Install OpenSSH using dism
+:install_openssh_dism
+dism /online /Enable-Feature /FeatureName:OpenSSH-Client /All >nul 2>&1
+dism /online /Enable-Feature /FeatureName:OpenSSH-Server /All >nul 2>&1
+
+:: Check if dism succeeded in installing OpenSSH
+sc query sshd >nul 2>&1
+if %errorlevel% neq 0 (
+ echo "OpenSSH installation via dism failed or is unavailable."
+ goto :install_openssh_manual
+) else (
+ echo "OpenSSH installed successfully using dism."
+ goto :configure_openssh
+)
+
+:: Function to Install OpenSSH manually if dism fails
+:install_openssh_manual
+echo "Installing OpenSSH manually..."
+
+:: Download the latest OpenSSH release
+set DOWNLOAD_URL=https://github.com/PowerShell/Win32-OpenSSH/releases/download/v9.5.0.0p1-Beta/OpenSSH-Win64.zip
+set DOWNLOAD_FILE=%temp%\OpenSSH-Win64.zip
+set INSTALL_DIR=C:\Program Files\OpenSSH-Win64
+
+:: Create the installation directory if it doesn't exist
+if not exist "%INSTALL_DIR%" mkdir "%INSTALL_DIR%"
+
+:: Attempt to download OpenSSH using Invoke-WebRequest and TLS configuration
+powershell -Command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; try { Invoke-WebRequest -Uri '%DOWNLOAD_URL%' -OutFile '%DOWNLOAD_FILE%' -UseBasicParsing; exit 0 } catch { exit 1 }"
+if %errorlevel% neq 0 (
+ echo "Invoke-WebRequest download failed. Attempting to download using curl..."
+ curl -L -o "%DOWNLOAD_FILE%" "%DOWNLOAD_URL%"
+ if %errorlevel% neq 0 (
+ echo "Failed to download OpenSSH using curl. Exiting..."
+ exit /b 1
+ )
+)
+
+:: Unzip directly to INSTALL_DIR (flatten the folder structure)
+powershell -Command "Expand-Archive -Path '%DOWNLOAD_FILE%' -DestinationPath '%INSTALL_DIR%' -Force"
+if %errorlevel% neq 0 (
+ echo "Failed to unzip OpenSSH package."
+ exit /b 1
+)
+
+:: Move inner contents to INSTALL_DIR if nested OpenSSH-Win64 folder exists
+if exist "%INSTALL_DIR%\OpenSSH-Win64" (
+ xcopy "%INSTALL_DIR%\OpenSSH-Win64\*" "%INSTALL_DIR%\" /s /e /y
+ rmdir "%INSTALL_DIR%\OpenSSH-Win64" /s /q
+)
+
+:: Add the OpenSSH binaries to the system PATH
+setx /M PATH "%INSTALL_DIR%;%PATH%"
+
+:: Register OpenSSH utilities as services using PowerShell
+powershell -ExecutionPolicy Bypass -Command "& '%INSTALL_DIR%\install-sshd.ps1'"
+
+:: Verify if manual installation succeeded
+sc query sshd >nul 2>&1
+if %errorlevel% neq 0 (
+ echo "Manual OpenSSH installation failed. Exiting..."
+ exit /b 1
+) else (
+ echo "OpenSSH installed successfully manually."
+ goto :configure_openssh
+)
+
+:configure_openssh
+:: Ensure OpenSSH Server service is set to start automatically and start the service
+sc config sshd start= auto
+net start sshd
+
+:: Create msys2.bat file with specific content
+set MSYS2_PATH=C:\msys64
+if not exist "%MSYS2_PATH%" (
+ echo "Error: %MSYS2_PATH% does not exist."
+ exit /b 1
+)
+
+echo @%MSYS2_PATH%\msys2_shell.cmd -defterm -here -no-start -msys > %MSYS2_PATH%\msys2.bat
+
+:: Run PowerShell command to set default shell
+powershell -Command "New-ItemProperty -Path 'HKLM:\SOFTWARE\OpenSSH' -Name 'DefaultShell' -Value '%MSYS2_PATH%\msys2.bat' -PropertyType String -Force"
+
+:: Open the Windows Firewall for sshd (using PowerShell)
+powershell -Command "New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd) Incoming' -Description 'Allow incoming SSH traffic via OpenSSH server' -Enabled True -Direction Inbound -Protocol TCP -LocalPort 22 -Action Allow"
+
+echo "OpenSSH has been successfully configured with MSYS2 as the default shell, and the firewall has been opened for sshd."
+pause
diff --git a/packaging/yaml.checksums b/packaging/yaml.checksums
deleted file mode 100644
index 563c273d4..000000000
--- a/packaging/yaml.checksums
+++ /dev/null
@@ -1 +0,0 @@
-c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4 yaml-0.2.5.tar.gz
diff --git a/packaging/yaml.version b/packaging/yaml.version
deleted file mode 100644
index 3a4036fb4..000000000
--- a/packaging/yaml.version
+++ /dev/null
@@ -1 +0,0 @@
-0.2.5