From 574098461cd45be12a497afbdac6f93c58978387 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 3 Sep 2019 12:23:38 +0200 Subject: Adding upstream version 1.17.0. Signed-off-by: Daniel Baumann --- .travis/README.md | 143 ------------------ .travis/check_changelog_last_modification.sh | 21 --- .travis/create_artifacts.sh | 59 -------- .travis/draft_release.sh | 65 --------- .travis/gcs-credentials.json.enc | Bin 2320 -> 0 bytes .travis/generate_changelog_and_tag_release.sh | 67 --------- .travis/generate_changelog_for_nightlies.sh | 70 --------- .travis/generate_changelog_for_release.sh | 36 ----- .travis/labeler.sh | 82 ----------- .travis/nightlies.sh | 49 ------- .travis/package_management/build.sh | 31 ---- .../build_package_in_container.sh | 82 ----------- .travis/package_management/common.py | 162 --------------------- .../configure_deb_lxc_environment.py | 95 ------------ .../configure_rpm_lxc_environment.py | 102 ------------- .travis/package_management/create_lxc_for_build.sh | 101 ------------- .travis/package_management/functions.sh | 33 ----- .../package_management/package_cloud_wrapper.sh | 48 ------ .travis/package_management/prepare_packages.sh | 63 -------- .../package_management/trigger_deb_lxc_build.py | 73 ---------- .../package_management/trigger_rpm_lxc_build.py | 55 ------- .travis/package_management/yank_stale_rpm.sh | 35 ----- .travis/run_install_with_dist_file.sh | 48 ------ .travis/tagger.sh | 77 ---------- 24 files changed, 1597 deletions(-) delete mode 100644 .travis/README.md delete mode 100755 .travis/check_changelog_last_modification.sh delete mode 100755 .travis/create_artifacts.sh delete mode 100755 .travis/draft_release.sh delete mode 100644 .travis/gcs-credentials.json.enc delete mode 100755 .travis/generate_changelog_and_tag_release.sh delete mode 100755 .travis/generate_changelog_for_nightlies.sh delete mode 100755 .travis/generate_changelog_for_release.sh delete mode 100755 .travis/labeler.sh delete mode 100755 .travis/nightlies.sh delete mode 100644 .travis/package_management/build.sh delete mode 100755 .travis/package_management/build_package_in_container.sh delete mode 100755 .travis/package_management/common.py delete mode 100755 .travis/package_management/configure_deb_lxc_environment.py delete mode 100755 .travis/package_management/configure_rpm_lxc_environment.py delete mode 100755 .travis/package_management/create_lxc_for_build.sh delete mode 100644 .travis/package_management/functions.sh delete mode 100755 .travis/package_management/package_cloud_wrapper.sh delete mode 100755 .travis/package_management/prepare_packages.sh delete mode 100755 .travis/package_management/trigger_deb_lxc_build.py delete mode 100755 .travis/package_management/trigger_rpm_lxc_build.py delete mode 100755 .travis/package_management/yank_stale_rpm.sh delete mode 100755 .travis/run_install_with_dist_file.sh delete mode 100755 .travis/tagger.sh (limited to '.travis') diff --git a/.travis/README.md b/.travis/README.md deleted file mode 100644 index 3b314fa18..000000000 --- a/.travis/README.md +++ /dev/null @@ -1,143 +0,0 @@ -# Description of CI build configuration - -## Variables needed by travis - -- GITHUB_TOKEN - GitHub token with push access to repository -- DOCKER_USERNAME - Username (netdatabot) with write access to docker hub repository -- DOCKER_PWD - Password to docker hub -- encrypted_8daf19481253_key - key needed by openssl to decrypt GCS credentials file -- encrypted_8daf19481253_iv - IV needed by openssl to decrypt GCS credentials file -- COVERITY_SCAN_TOKEN - Token to allow coverity test analysis uploads -- SLACK_USERNAME - This is required for the slack notifications triggered by travis pipeline -- SLACK_CHANNEL - This is the channel that Travis will be posting messages -- SLACK_NOTIFY_WEBHOOK_URL - This is the incoming URL webhook as provided by slack integration. Visit Apps integration in slack to generate the required hook -- SLACK_BOT_NAME - This is the name your bot will appear with on slack - -## CI workflow details -Our CI pipeline is designed to help us identify and mitigate risks at all stages of implementation. -To accommodate this need, we used [Travis CI](http://www.travis-ci.com) as our CI/CD tool. -Our main areas of concern are: -1) Only push code that is working. That means fail fast so that we can improve before we reach the public - -2) Reduce the time to market to minimum, by streamlining the release process. - That means a lot of testing, a lot of consistency checks, a lot of validations - -3) Generated artifacts consistency. We should not allow broken software to reach the public. - When this happens, it's embarassing and we struggle to eliminate it. - -4) We are an innovative company, so we love to automate :) - - -Having said that, here's a brief introduction to Netdata's improved CI/CD pipeline with Travis. -Our CI/CD lifecycle contains three different execution entry points: -1) A user opens a pull request to netdata/master: Travis will run a pipeline on the branch under that PR -2) A merge or commit happens on netdata/master. This will trigger travis to run, but we have two distinct cases in this scenario: - a) A user merges a pull request to netdata/master: Travis will run on master, after the merge. - b) A user runs a commit/merge with a special keyword (mentioned later). - This triggers a release for either minor, major or release candidate versions, depending the keyword -3) A scheduled job runs on master once per day: Travis will run on master at the scheduled interval - -To accommodate all three entry points our CI/CD workflow has a set of steps that run on all three entry points. -Once all these steps are successfull, then our pipeline executes another subset of steps for entry points 2 and 3. -In travis terms the "steps" are "Stages" and within each stage we execute a set of activities called "jobs" in travis. - -### Always run: Stages that running on all three execution entry points - -## Code quality, linting, syntax, code style -At this early stage we iterate through a set of basic quality control checks: -- Shell checking: Run linters for our various BASH scripts -- Checksum validators: Run validators to ensure our installers and documentation are in sync -- Dashboard validator: We provide a pre-generated dashboard.js script file that we need to make sure its up to date. We validate that. - -## Build process -At this stage, basically, we build :-) -We do a baseline check of our build artifacts to guarantee they are not broken -Briefly our activities include: -- Verify docker builds successfully -- Run the standard netdata installer, to make sure we build & run properly -- Do the same through 'make dist', as this is our stable channel for our kickstart files - -## Artifacts validation -At this point we know our software is building, we need to go through the a set of checks, to guarantee -that our product meets certain epxectations. At the current stage, we are focusing on basic capabilities -like installing in different distributions, running the full lifecycle of install-run-update-install and so on. -We are still working on enriching this with more and more use cases, to get us closer to achieving full stability of our software. -Briefly we currently evaluate the following activities: -- Basic software unit testing -- Non containerized build and install on ubuntu 14.04 -- Non containerized build and install on ubuntu 18.04 -- Running the full netdata lifecycle (install, update, uninstall) on ubuntu 18.04 -- Build and install on CentOS 6 -- Build and install on CentOS 7 -(More to come) - -### Nightly operations: Stages that run daily under cronjob -The nightly stages are related to the daily nightly activities, that produce our daily latest releases. -We also maintain a couple of cronjobs that run during the night to provide us with deeper insights, -like for example coverity scanning or extended kickstart checksum checks - -## Nightly operations -At this stage we run scheduled jobs and execute the nightly changelog generator, coverity scans, -labeler for our issues and extended kickstart files checksum validations. - -## Nightly release -During this stage we are building and publishing latest docker images, prepare the nightly artifacts -and deploy them (the artifacts) to our google cloud service provider. - - -### Publishing -Publishing is responsible for executing the major/minor/patch releases and is separated -in two stages: packaging preparation process and publishing. - -## Packaging for release -During packaging we are preparing the release changelog information and run the labeler. - -## Publish for release -The publishing stage is the most complex part in publishing. This is the stage were we generate and publish docker images, -prepare the release artifacts and get ready with the release draft. - -### Package Management workflows -As part of our goal to provide the best support to our customers, we have created a set of CI workflows to automatically produce -DEB and RPM for multiple distributions. These workflows are implemented under the templated stages '_DEB_TEMPLATE' and '_RPM_TEMPLATE'. -We currently plan to actively support the following Operating Systems, with a plan to further expand this list following our users needs. - -### Operating systems supported -The following distributions are supported -- Debian versions - - Buster (TBD - not released yet, check [debian releases](https://www.debian.org/releases/) for details) - - Stretch - - Jessie - - Wheezy - -- Ubuntu versions - - Disco - - Cosmic - - Bionic - - artful - -- Enterprise Linux versions (Covers Redhat, CentOS, and Amazon Linux with version 6) - - Version 8 (TBD) - - Version 7 - - Version 6 - -- Fedora versions - - Version 31 (TBD) - - Version 30 - - Version 29 - - Version 28 - -- OpenSuSE versions - - 15.1 - - 15.0 - -- Gentoo distributions - - TBD - -### Architectures supported -We plan to support amd64, x86 and arm64 architectures. As of June 2019 only amd64 and x86 will become available, as we are still working on solving issues with the architecture. - -The Package deployment can be triggered manually by executing an empty commit with the following message pattern: `[Package PACKAGE_TYPE PACKAGE_ARCH] DESCRIBE_THE_REASONING_HERE`. -Travis Yaml configuration allows the user to combine package type and architecture as necessary to regenerate the current stable release (For example tag v1.15.0 as of 4th of May 2019) -Sample patterns to trigger building of packages for all amd64 supported architecture: -- '[Package amd64 RPM]': Build & publish all amd64 available RPM packages -- '[Package amd64 DEB]': Build & publish all amd64 available DEB packages diff --git a/.travis/check_changelog_last_modification.sh b/.travis/check_changelog_last_modification.sh deleted file mode 100755 index 2665c0627..000000000 --- a/.travis/check_changelog_last_modification.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -set -e - -LAST_MODIFICATION="$(git log -1 --pretty="format:%at" CHANGELOG.md)" -CURRENT_TIME="$(date +"%s")" -TWO_DAYS_IN_SECONDS=172800 - -DIFF=$((CURRENT_TIME - LAST_MODIFICATION)) - -echo "Checking CHANGELOG.md last modification time on GIT.." -echo "CHANGELOG.md timestamp: ${LAST_MODIFICATION}" -echo "Current timestamp: ${CURRENT_TIME}" -echo "Diff: ${DIFF}" - -if [ ${DIFF} -gt ${TWO_DAYS_IN_SECONDS} ]; then - echo "CHANGELOG.md is more than two days old!" - post_message "TRAVIS_MESSAGE" "Hi , CHANGELOG.md was found more than two days old (Diff: ${DIFF} seconds)" "${NOTIF_CHANNEL}" -else - echo "CHANGELOG.md is less than two days old, fine" -fi diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh deleted file mode 100755 index 9670f229a..000000000 --- a/.travis/create_artifacts.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -# -# Artifacts creation script. -# This script generates two things: -# 1) The static binary that can run on all linux distros (built-in dependencies etc) -# 2) The distribution source tarbal -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author: Paul Emm. Katsoulakis -# -# shellcheck disable=SC2230 - -set -e - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup || echo "") -if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository" - exit 1 -fi - -if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - echo "Beta mode on ${TRAVIS_REPO_SLUG}, not running anything here" - exit 0 -fi; - - -echo "--- Initialize git configuration ---" -git checkout "${1-master}" -git pull - -# Everything from this directory will be uploaded to GCS -mkdir -p artifacts -BASENAME="netdata-$(git describe)" - -# Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads. -# See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details. -python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' -echo "--- Create tarball ---" -autoreconf -ivf -./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2 -make dist -mv "${BASENAME}.tar.gz" artifacts/ - -echo "--- Create self-extractor ---" -./packaging/makeself/build-x86_64-static.sh - -# Needed for GCS -echo "--- Copy artifacts to separate directory ---" -#shellcheck disable=SC2164 -cp packaging/version artifacts/latest-version.txt -cd artifacts -ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz -ln -s "${BASENAME}.gz.run" netdata-latest.gz.run -sha256sum -b ./* >"sha256sums.txt" -echo "checksums:" -cat sha256sums.txt diff --git a/.travis/draft_release.sh b/.travis/draft_release.sh deleted file mode 100755 index ddc0f9ad5..000000000 --- a/.travis/draft_release.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash -# -# Draft release generator. -# This utility is responsible for submitting a draft release to github repo -# It is agnostic of other processes, when executed it will draft a release, -# based on the most recent reachable tag. -# -# Requirements: -# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo -# - artifacts directory in place -# - The directory is created by create_artifacts.sh mechanism -# - The artifacts need to be created with the same tag, obviously -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author: Pavlos Emm. Katsoulakis - -set -e - -if [ ! -f .gitignore ]; then - echo "Run as ./travis/$(basename "$0") from top level directory of git repository" - exit 1 -fi - -echo "--- Initialize git configuration ---" -git checkout master -git pull - - -if [[ $(git describe) =~ -rc* ]]; then - echo "This is a release candidate tag, we do not generate a release draft" - exit 0 -fi - -# Load the tag, if any -GIT_TAG=$(git describe) - -if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - echo "Beta mode on ${TRAVIS_REPO_SLUG}, i was about to run for release (${GIT_TAG}), but i am emulating, so bye" - exit 0 -fi; - -echo "---- CREATING RELEASE DRAFT WITH ASSETS -----" -# Download hub -HUB_VERSION=${HUB_VERSION:-"2.5.1"} -wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" -tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" -export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin" - -# Create a release draft -if [ -z ${GIT_TAG+x} ]; then - echo "Variable GIT_TAG is not set. Something went terribly wrong! Exiting." - exit 1 -fi -if [ "${GIT_TAG}" != "$(git tag --points-at)" ]; then - echo "ERROR! Current commit is not tagged. Stopping release creation." - exit 1 -fi -until hub release create --draft \ - -a "artifacts/netdata-${GIT_TAG}.tar.gz" \ - -a "artifacts/netdata-${GIT_TAG}.gz.run" \ - -a "artifacts/sha256sums.txt" \ - -m "${GIT_TAG}" "${GIT_TAG}"; do - sleep 5 -done diff --git a/.travis/gcs-credentials.json.enc b/.travis/gcs-credentials.json.enc deleted file mode 100644 index 5d1e7b2dd..000000000 Binary files a/.travis/gcs-credentials.json.enc and /dev/null differ diff --git a/.travis/generate_changelog_and_tag_release.sh b/.travis/generate_changelog_and_tag_release.sh deleted file mode 100755 index fb155b264..000000000 --- a/.travis/generate_changelog_and_tag_release.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# -# Script to automatically do a couple of things: -# - generate a new tag according to semver (https://semver.org/) -# - generate CHANGELOG.md by using https://github.com/skywinder/github-changelog-generator -# -# Tags are generated by searching for a keyword in last commit message. Keywords are: -# - [patch] or [fix] to bump patch number -# - [minor], [feature] or [feat] to bump minor number -# - [major] or [breaking change] to bump major number -# All keywords MUST be surrounded with square braces. -# -# Script uses git mechanisms for locking, so it can be used in parallel builds -# -# Requirements: -# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo -# - docker -# -# This is a modified version of: -# https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author: Pavlos Emm. Katsoulakis -# Author: Pawel Krupa (@paulfantom) -set -e - -if [ ! -f .gitignore ]; then - echo "Run as ./travis/$(basename "$0") from top level directory of git repository" - exit 1 -fi - -echo "--- Executing Tagging facility to determine TAG ---" -source .travis/tagger.sh - -echo "--- Changelog generator and tagger script starting ---" -# If tagger script hasn't produced a TAG, there is nothing to do so bail out happy -if [ -z "${GIT_TAG}" ]; then - echo "GIT_TAG is empty, nothing to do for now (Value: $GIT_TAG)" - exit 0 -fi - -if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing to do on the changelog generator and tagging script for (${GIT_TAG}), bye" - exit 0 -fi - -echo "--- Initialize git configuration ---" -export GIT_MAIL="bot@netdata.cloud" -export GIT_USER="netdatabot" -git checkout master -git pull - -echo "---- UPDATE VERSION FILE ----" -echo "$GIT_TAG" >packaging/version -git add packaging/version - -echo "---- GENERATE CHANGELOG -----" -./.travis/generate_changelog_for_release.sh -git add CHANGELOG.md - -echo "---- COMMIT AND PUSH CHANGES ----" -git commit -m "[ci skip] release $GIT_TAG" --author "${GIT_USER} <${GIT_MAIL}>" -git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER" -git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" -git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags -# After those operations output of command `git describe` should be identical with a value of GIT_TAG diff --git a/.travis/generate_changelog_for_nightlies.sh b/.travis/generate_changelog_for_nightlies.sh deleted file mode 100755 index b90862880..000000000 --- a/.travis/generate_changelog_for_nightlies.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -# -# Changelog generation scriptlet. -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pawel Krupa (paulfantom) -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -set -e - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup || echo "") -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository" - echo "Changelog generation process aborted" - exit 1 -fi - -LAST_TAG="$1" -COMMITS_SINCE_RELEASE="$2" -NEW_VERSION="${LAST_TAG}-$((COMMITS_SINCE_RELEASE + 1))-nightly" -ORG=$(echo "$TRAVIS_REPO_SLUG" | cut -d '/' -f1) -PROJECT=$(echo "$TRAVIS_REPO_SLUG" | cut -d '/' -f 2) -GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"} -GIT_USER=${GIT_USER:-"netdatabot"} -PUSH_URL=$(git config --get remote.origin.url | sed -e 's/^https:\/\///') -FAIL=0 -if [ -z ${GIT_TAG+x} ]; then - OPTS="" -else - OPTS="--future-release ${GIT_TAG}" -fi -echo "We got $COMMITS_SINCE_RELEASE changes since $LAST_TAG, re-generating changelog" - -if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do here" - exit 0 -fi - -git checkout master -git pull - -echo "Running project markmandel for github changelog generation" -#docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \ -docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \ - --user "${ORG}" \ - --project "${PROJECT}" \ - --token "${GITHUB_TOKEN}" \ - --since-tag "v1.10.0" \ - --unreleased-label "**Next release**" \ - --no-issues \ - --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \ - --no-compare-link ${OPTS} - -echo "Changelog created! Adding packaging/version(${NEW_VERSION}) and CHANGELOG.md to the repository" -echo "${NEW_VERSION}" > packaging/version -git add packaging/version && echo "1) Added packaging/version to repository" || FAIL=1 -git add CHANGELOG.md && echo "2) Added changelog file to repository" || FAIL=1 -git commit -m '[ci skip] create nightly packages and update changelog' --author "${GIT_USER} <${GIT_MAIL}>" && echo "3) Committed changes to repository" || FAIL=1 -git push "https://${GITHUB_TOKEN}:@${PUSH_URL}" && echo "4) Pushed changes to remote ${PUSH_URL}" || FAIL=1 - -# In case of a failure, wrap it up and bail out cleanly -if [ $FAIL -eq 1 ]; then - git clean -xfd - echo "Changelog generation failed during github UPDATE!" - exit 1 -fi - -echo "Changelog generation completed successfully!" diff --git a/.travis/generate_changelog_for_release.sh b/.travis/generate_changelog_for_release.sh deleted file mode 100755 index de5d3e702..000000000 --- a/.travis/generate_changelog_for_release.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -e - -if [ ! -f .gitignore ]; then - echo "Run as ./travis/$(basename "$0") from top level directory of git repository" - exit 1 -fi - -ORGANIZATION=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $1}') -PROJECT=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $2}') -GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"} -GIT_USER=${GIT_USER:-"netdatabot"} -if [ -z ${GIT_TAG+x} ]; then - OPTS="" -else - OPTS="--future-release ${GIT_TAG}" -fi - -if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do" - exit 0 -fi - -echo "--- Creating changelog ---" -git checkout master -git pull -#docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \ -docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \ - --user "${ORGANIZATION}" \ - --project "${PROJECT}" \ - --token "${GITHUB_TOKEN}" \ - --since-tag "v1.10.0" \ - --unreleased-label "**Next release**" \ - --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \ - --no-compare-link ${OPTS} diff --git a/.travis/labeler.sh b/.travis/labeler.sh deleted file mode 100755 index 7863084d9..000000000 --- a/.travis/labeler.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -# This is a simple script which should apply labels to unlabelled issues from last 3 days. -# It will soon be deprecated by GitHub Actions so no futher development on it is planned. - -# Previously this was using POST to only add labels. But this method seems to be failing with larger number of requests -new_labels() { - ISSUE="$1" - URL="https://api.github.com/repos/netdata/netdata/issues/$ISSUE/labels" - # deduplicate array and add quotes - SET=( $(for i in "${@:2}"; do [ "$i" != "" ] && echo "\"$i\""; done | sort -u) ) - # implode array to string - LABELS="${SET[*]}" - # add commas between quotes (replace spaces) - LABELS="${LABELS//\" \"/\",\"}" - # remove duplicate quotes in case parameters were already quoted - LABELS="${LABELS//\"\"/\"}" - echo "-------- Assigning labels to #${ISSUE}: ${LABELS} --------" - curl -H "Authorization: token $GITHUB_TOKEN" -d "{\"labels\":[${LABELS}]}" -X PUT "${URL}" &>/dev/null -} - -if [ "$GITHUB_TOKEN" == "" ]; then - echo "GITHUB_TOKEN is needed" - exit 1 -fi - -if ! [ -x "$(command -v hub)" ]; then - echo "===== Download HUB =====" - HUB_VERSION=${HUB_VERSION:-"2.5.1"} - wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" - tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" &>/dev/null - export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin" -fi - -echo "===== Looking up available labels =====" -LABELS_FILE=/tmp/labels -hub issue labels >$LABELS_FILE - -# Change all 'area' labels assigned to PR saving non-area labels. -echo "===== Categorizing PRs =====" -NEW_LABELS=/tmp/new_labels -for PR in $(hub pr list -s all -f "%I%n" -L 10); do - echo "----- Processing PR #$PR -----" - echo "" >$NEW_LABELS - NEW_SET="" - DIFF_URL="https://github.com/netdata/netdata/pull/$PR.diff" - for FILE in $(curl -L "${DIFF_URL}" 2>/dev/null | grep "diff --git a/" | cut -d' ' -f3 | sort | uniq); do - LABEL="" - case "${FILE}" in - *".md") AREA="docs" ;; - *"/collectors/python.d.plugin/"*) AREA="external/python" ;; - *"/collectors/charts.d.plugin/"*) AREA="external" ;; - *"/collectors/node.d.plugin/"*) AREA="external" ;; - *"/.travis"*) AREA="ci" ;; - *"/.github/*.md"*) AREA="docs" ;; - *"/.github/"*) AREA="ci" ;; - *"/build/"*) AREA="packaging" ;; - *"/contrib/"*) AREA="packaging" ;; - *"/diagrams/"*) AREA="docs" ;; - *"/installer/"*) AREA="packaging" ;; - *"/makeself/"*) AREA="packaging" ;; - *"/system/"*) AREA="packaging" ;; - *"/netdata-installer.sh"*) AREA="packaging" ;; - *) AREA=$(echo "$FILE" | cut -d'/' -f2) ;; - esac - LABEL="area/$AREA" - echo "Selecting $LABEL due to $FILE" - if grep "$LABEL" "$LABELS_FILE"; then - echo "$LABEL" >>$NEW_LABELS - if [[ $LABEL =~ "external" ]]; then - echo "area/collectors" >>$NEW_LABELS - fi - else - echo "-------- Label '$LABEL' not available --------" - fi - done - NEW_SET=$(sort $NEW_LABELS | uniq) - if [ ! -z "$NEW_SET" ]; then - PREV=$(curl -H "Authorization: token $GITHUB_TOKEN" "https://api.github.com/repos/netdata/netdata/issues/$PR/labels" 2>/dev/null | jq '.[].name' | grep -v "area") - new_labels "$PR" ${NEW_SET} "${PREV[*]}" - fi -done diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh deleted file mode 100755 index 002461041..000000000 --- a/.travis/nightlies.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# -# This is the nightly changelog generation script -# It is responsible for two major activities: -# 1) Update packaging/version with the current nightly version -# 2) Generate the changelog for the mentioned version -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pawel Krupa (paulfantom) -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -set -e - -FAIL=0 - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup || echo "") -if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository" - echo "Changelog generation process aborted" - exit 1 -fi - -LAST_TAG=$(git describe --abbrev=0 --tags) -COMMITS_SINCE_RELEASE=$(git rev-list "${LAST_TAG}"..HEAD --count) -PREVIOUS_NIGHTLY_COUNT="$(rev contrib/debian" -ln -sf contrib/debian debian - -echo "Executing dpkg-buildpackage" -if dpkg-buildpackage --version 2> /dev/null | grep -q "1.18"; then - dpkg-buildpackage --post-clean --pre-clean --build=binary -else - dpkg-buildpackage -b -fi - -echo "DEB build script completed!" diff --git a/.travis/package_management/build_package_in_container.sh b/.travis/package_management/build_package_in_container.sh deleted file mode 100755 index 95a68e7a8..000000000 --- a/.travis/package_management/build_package_in_container.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash -# -# Entry point for package build process -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -#shellcheck disable=SC1091 -set -e - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup) -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository" - echo "Docker build process aborted" - exit 1 -fi - -source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1) - -# Check for presence of mandatory environment variables -if [ -z "${BUILD_STRING}" ]; then - echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILDER_NAME}" ]; then - echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILD_DISTRO}" ]; then - echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILD_RELEASE}" ]; then - echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script" - exit 1 -fi - -if [ -z "${PACKAGE_TYPE}" ]; then - echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script" - exit 1 -fi - -# Detect architecture and load extra variables needed -detect_arch_from_commit - -case "${BUILD_ARCH}" in -"all") - echo "* * * Building all architectures, amd64 and i386 * * *" - echo "Building for amd64.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}" - - echo "Building for arm64.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}" - - echo "Building for i386.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}" - - ;; -"amd64"|"arm64"|"i386") - echo "Building for ${BUILD_ARCH}.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}" - ;; -*) - echo "Unknown build architecture '${BUILD_ARCH}', nothing to do for build" - exit 1 - ;; -esac - -echo "Build process completed!" diff --git a/.travis/package_management/common.py b/.travis/package_management/common.py deleted file mode 100755 index 6e7a26023..000000000 --- a/.travis/package_management/common.py +++ /dev/null @@ -1,162 +0,0 @@ -# -# -# Python library with commonly used functions within the package management scope -# -# Author : Pavlos Emm. Katsoulakis - -import lxc -import subprocess -import os - -def fetch_version(orig_build_version): - tag = None - friendly_version = "" - - # TODO: Checksum validations - if str(orig_build_version).count(".latest") == 1: - version_list=str(orig_build_version).replace('v', '').split('.') - friendly_version='.'.join(version_list[0:2]) + "." + version_list[3] - else: - friendly_version = orig_build_version.replace('v', '') - tag = friendly_version # Go to stable tag - print("Version set to %s from %s" % (friendly_version, orig_build_version)) - - return friendly_version, tag - -def replace_tag(tag_name, spec, new_tag_content): - print("Fixing tag %s in %s" % (tag_name, spec)) - - ifp = open(spec, "r") - config = ifp.readlines() - ifp.close() - - source_line = -1 - for line in config: - if str(line).count(tag_name + ":") > 0: - source_line = config.index(line) - print("Found line: %s in item %d" % (line, source_line)) - break - - if source_line >= 0: - print("Replacing line %s with %s in spec file" %(config[source_line], new_tag_content)) - config[source_line] = "%s: %s\n" % (tag_name, new_tag_content) - config_str = ''.join(config) - ofp = open(spec, 'w') - ofp.write(config_str) - ofp.close() - -def run_command(container, command): - print("Running command: %s" % command) - command_result = container.attach_wait(lxc.attach_run_command, command) - - if command_result != 0: - raise Exception("Command failed with exit code %d" % command_result) - -def run_command_in_host(cmd): - print("Issue command in host: %s" % str(cmd)) - - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - o, e = proc.communicate() - print('Output: ' + o.decode('ascii')) - print('Error: ' + e.decode('ascii')) - print('code: ' + str(proc.returncode)) - -def prepare_repo(container): - if str(os.environ["REPO_TOOL"]).count("zypper") == 1: - run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"]) - run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"]) - - elif str(os.environ["REPO_TOOL"]).count("yum") == 1: - run_command(container, [os.environ["REPO_TOOL"], "clean", "all"]) - run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - - if os.environ["BUILD_STRING"].count("el/7") == 1 and os.environ["BUILD_ARCH"].count("i386") == 1: - print ("Skipping epel-release install for %s-%s" % (os.environ["BUILD_STRING"], os.environ["BUILD_ARCH"])) - else: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"]) - - elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: - run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - else: - run_command(container, [os.environ["REPO_TOOL"], "update", "-y"]) - - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) - -def install_common_dependendencies(container): - if str(os.environ["REPO_TOOL"]).count("zypper") == 1: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c"]) - - elif str(os.environ["REPO_TOOL"]).count("yum") == 1: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) - - elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "g++"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libipmimonitoring-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libjson-c-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libsnappy-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotobuf-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotoc-dev"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) - if os.environ["BUILD_STRING"].count("debian/jessie") == 1: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy"]) - else: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"]) - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"]) - - if os.environ["BUILD_STRING"].count("el/6") <= 0: - run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autogen"]) - -def prepare_version_source(dest_archive, pkg_friendly_version, tag=None): - print(".0 Preparing local implementation tarball for version %s" % pkg_friendly_version) - tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive - - if tag is not None: - print(".1 Checking out tag %s" % tag) - run_command_in_host(['git', 'fetch', '--all']) - - # TODO: Keep in mind that tricky 'v' there, needs to be removed once we clear our versioning scheme - run_command_in_host(['git', 'checkout', 'v%s' % pkg_friendly_version]) - - print(".2 Tagging the code with version: %s" % pkg_friendly_version) - run_command_in_host(['git', 'tag', '-a', pkg_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]]) - - print(".3 Run autoreconf -ivf") - run_command_in_host(['autoreconf', '-ivf']) - - print(".4 Run configure") - run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata']) - - print(".5 Run make dist") - run_command_in_host(['make', 'dist']) - - print(".6 Copy generated tarbal to desired path") - if os.path.exists('netdata-%s.tar.gz' % pkg_friendly_version): - run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % pkg_friendly_version, tar_file]) - - print(".7 Fixing permissions on tarball") - run_command_in_host(['sudo', 'chmod', '777', tar_file]) - else: - print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % pkg_friendly_version) - sys.exit(1) diff --git a/.travis/package_management/configure_deb_lxc_environment.py b/.travis/package_management/configure_deb_lxc_environment.py deleted file mode 100755 index 12328dde7..000000000 --- a/.travis/package_management/configure_deb_lxc_environment.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -# -# Prepare the build environment within the container -# The script attaches to the running container and does the following: -# 1) Create the container -# 2) Start the container up -# 3) Create the builder user -# 4) Prepare the environment for DEB build -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis - -import common -import os -import sys -import lxc - -if len(sys.argv) != 2: - print('You need to provide a container name to get things started') - sys.exit(1) -container_name=sys.argv[1] - -# Setup the container object -print("Defining container %s" % container_name) -container = lxc.Container(container_name) -if not container.defined: - raise Exception("Container %s not defined!" % container_name) - -# Start the container -if not container.start(): - raise Exception("Failed to start the container") - -if not container.running or not container.state == "RUNNING": - raise Exception('Container %s is not running, configuration process aborted ' % container_name) - -# Wait for connectivity -print("Waiting for container connectivity to start configuration sequence") -if not container.get_ips(timeout=30): - raise Exception("Timeout while waiting for container") - -build_path = "/home/%s" % os.environ['BUILDER_NAME'] - -# Run the required activities now -# 1. Create the builder user -print("1. Adding user %s" % os.environ['BUILDER_NAME']) -common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) - -# Fetch package dependencies for the build -print("2. Preparing repo on LXC container") -common.prepare_repo(container) - -print("2.1 Install .DEB build support packages") -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dpkg-dev"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libdistro-info-perl"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-make"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-systemd"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-autoreconf"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "git-buildpackage"]) - -print("2.2 Add more dependencies") -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter-acct-dev"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"]) - -print ("3.1 Run install-required-packages scriptlet") -common.run_command(container, ["wget", "-T", "15", "-O", "%s/.install-required-packages.sh" % build_path, "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "%s/.install-required-packages.sh" % build_path, "netdata", "--dont-wait", "--non-interactive"]) - -print("3.2 Installing package dependencies within LXC container") -common.install_common_dependendencies(container) - -friendly_version="" -dest_archive="" -download_url="" -tag = None -friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) - -tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), friendly_version) - -print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) -dest_archive="%s/netdata-%s.tar.gz" % (build_path, friendly_version) - -if str(os.environ["BUILD_STRING"]).count("debian/jessie") == 1: - print("5.1 We are building for Jessie, adjusting control file") - common.run_command_in_host(['sudo', 'rm', 'contrib/debian/control']) - common.run_command_in_host(['sudo', 'cp', 'contrib/debian/control.jessie', 'contrib/debian/control']) - -common.prepare_version_source(dest_archive, friendly_version, tag=tag) - -print("6. Installing build.sh script to build path") -common.run_command_in_host(['sudo', 'cp', '.travis/package_management/build.sh', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) -common.run_command_in_host(['sudo', 'chmod', '777', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)]) -common.run_command_in_host(['sudo', 'ln', '-sf', 'contrib/debian', 'debian']) - -print("Done!") diff --git a/.travis/package_management/configure_rpm_lxc_environment.py b/.travis/package_management/configure_rpm_lxc_environment.py deleted file mode 100755 index 79d34608f..000000000 --- a/.travis/package_management/configure_rpm_lxc_environment.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# -# -# Prepare the build environment within the container -# The script attaches to the running container and does the following: -# 1) Create the container -# 2) Start the container up -# 3) Create the builder user -# 4) Prepare the environment for RPM build -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis - -import common -import os -import sys -import lxc - -if len(sys.argv) != 2: - print('You need to provide a container name to get things started') - sys.exit(1) -container_name=sys.argv[1] - -# Setup the container object -print("Defining container %s" % container_name) -container = lxc.Container(container_name) -if not container.defined: - raise Exception("Container %s not defined!" % container_name) - -# Start the container -if not container.start(): - raise Exception("Failed to start the container") - -if not container.running or not container.state == "RUNNING": - raise Exception('Container %s is not running, configuration process aborted ' % container_name) - -# Wait for connectivity -print("Waiting for container connectivity to start configuration sequence") -if not container.get_ips(timeout=30): - raise Exception("Timeout while waiting for container") - -# Run the required activities now -# Create the builder user -print("1. Adding user %s" % os.environ['BUILDER_NAME']) -common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']]) - -# Fetch package dependencies for the build -print("2.1 Preparing repo on LXC container") -common.prepare_repo(container) - -common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"]) -common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"]) - -# Exceptional cases, not available everywhere -# -print("2.2 Running uncommon dependencies and preparing LXC environment") -# Not on Centos-7 -if os.environ["BUILD_STRING"].count("el/7") <= 0: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter_acct-devel"]) - -# Not on Centos-6 -if os.environ["BUILD_STRING"].count("el/6") <= 0: - common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autoconf-archive"]) - -print("2.3 Installing common dependencies") -common.install_common_dependendencies(container) - -print("3. Setting up macros") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "/bin/echo", "'%_topdir %(echo /home/" + os.environ['BUILDER_NAME'] + ")/rpmbuild' > /home/" + os.environ['BUILDER_NAME'] + "/.rpmmacros"]) - -print("4. Create rpmbuild directory") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/BUILD"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/RPMS"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SOURCES"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SPECS"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SRPMS"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltrR", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild"]) - -# Download the source -rpm_friendly_version="" -dest_archive="" -download_url="" -spec_file="/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME'] -tag = None -rpm_friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) -tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version) - -print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION']) -dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version) - -common.prepare_version_source(dest_archive, rpm_friendly_version, tag=tag) - -# Extract the spec file in place -print("6. Extract spec file from the source") -common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) -common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file]) - -print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file)) -common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file) - -print('Done!') diff --git a/.travis/package_management/create_lxc_for_build.sh b/.travis/package_management/create_lxc_for_build.sh deleted file mode 100755 index d733687a8..000000000 --- a/.travis/package_management/create_lxc_for_build.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env bash -# -# This script generates an LXC container and starts it up -# Once the script completes successfully, a container has become available for usage -# The container image to be used and the container name to be set, are part of variables -# that must be present for the script to work -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -# shellcheck disable=SC1091 -set -e - -source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1) - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup) -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository" - echo "LXC Container creation aborted" - exit 1 -fi - -# Check for presence of mandatory environment variables -if [ -z "${BUILD_STRING}" ]; then - echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILDER_NAME}" ]; then - echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILD_DISTRO}" ]; then - echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script" - exit 1 -fi - -if [ -z "${BUILD_RELEASE}" ]; then - echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script" - exit 1 -fi - -if [ -z "${PACKAGE_TYPE}" ]; then - echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script" - exit 1 -fi - -# Detect architecture and load extra variables needed -detect_arch_from_commit - -echo "Creating LXC container ${BUILDER_NAME}/${BUILD_STRING}/${BUILD_ARCH}...." - -case "${BUILD_ARCH}" in -"all") - # i386 - echo "Creating LXC Container for i386.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "i386" --no-validate - - echo "Container(s) ready. Configuring container(s).." - .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" - - # amd64 - echo "Creating LXC Container for amd64.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "amd64" --no-validate - - echo "Container(s) ready. Configuring container(s).." - .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" - - # arm64 - echo "Creating LXC Container for arm64.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "arm64" --no-validate - - echo "Container(s) ready. Configuring container(s).." - .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" - ;; -"i386"|"amd64"|"arm64") - # amd64 or i386 - echo "Creating LXC Container for ${BUILD_ARCH}.." - export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}" - export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs" - lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "${BUILD_ARCH}" --no-validate - - echo "Container(s) ready. Configuring container(s).." - .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}" - ;; -*) - echo "Unknown BUILD_ARCH value '${BUILD_ARCH}' given, process failed" - exit 1 - ;; -esac - -echo "..LXC creation complete!" diff --git a/.travis/package_management/functions.sh b/.travis/package_management/functions.sh deleted file mode 100644 index 0c4425fa5..000000000 --- a/.travis/package_management/functions.sh +++ /dev/null @@ -1,33 +0,0 @@ -# no-shebang-needed-its-a-library -# -# Utility functions for packaging in travis CI -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -#shellcheck disable=SC2148 -set -e - -function detect_arch_from_commit { - case "${TRAVIS_COMMIT_MESSAGE}" in - "[Package amd64"*) - export BUILD_ARCH="amd64" - ;; - "[Package i386"*) - export BUILD_ARCH="i386" - ;; - "[Package ALL"*) - export BUILD_ARCH="all" - ;; - "[Package arm64"*) - export BUILD_ARCH="arm64" - ;; - - *) - echo "Unknown build architecture in '${TRAVIS_COMMIT_MESSAGE}'. No BUILD_ARCH can be provided" - exit 1 - ;; - esac - - echo "Detected build architecture ${BUILD_ARCH}" -} diff --git a/.travis/package_management/package_cloud_wrapper.sh b/.travis/package_management/package_cloud_wrapper.sh deleted file mode 100755 index 48a372d37..000000000 --- a/.travis/package_management/package_cloud_wrapper.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# -# This is a tool to help removal of packages from packagecloud.io -# It utilizes the package_cloud utility provided from packagecloud.io -# -# Depends on: -# 1) package cloud gem (detects absence and installs it) -# -# Requires: -# 1) PKG_CLOUD_TOKEN variable exported -# 2) To properly install package_cloud when not found, it requires: ruby gcc gcc-c++ ruby-devel -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -#shellcheck disable=SC2068,SC2145 -set -e -PKG_CLOUD_CONFIG="$HOME/.package_cloud_configuration.cfg" - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup) -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository" - echo "Docker build process aborted" - exit 1 -fi - -# Install dependency if not there -if ! command -v package_cloud > /dev/null 2>&1; then - echo "No package cloud gem found, installing" - gem install -V package_cloud || (echo "Package cloud installation failed. you might want to check if required dependencies are there (ruby gcc gcc-c++ ruby-devel)" && exit 1) -else - echo "Found package_cloud gem, continuing" -fi - -# Check for required token and prepare config -if [ -z "${PKG_CLOUD_TOKEN}" ]; then - echo "Please set PKG_CLOUD_TOKEN to be able to use ${0}" - exit 1 -fi -echo "{\"url\":\"https://packagecloud.io\",\"token\":\"${PKG_CLOUD_TOKEN}\"}" > "${PKG_CLOUD_CONFIG}" - -echo "Executing package_cloud with config ${PKG_CLOUD_CONFIG} and parameters $@" -package_cloud $@ --config="${PKG_CLOUD_CONFIG}" - -rm -rf "${PKG_CLOUD_CONFIG}" -echo "Done!" diff --git a/.travis/package_management/prepare_packages.sh b/.travis/package_management/prepare_packages.sh deleted file mode 100755 index 12ed07cc7..000000000 --- a/.travis/package_management/prepare_packages.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash -# -# Utility that gathers generated packages, -# puts them together in a local folder for deploy facility to pick up -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -#shellcheck disable=SC2068 -set -e - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup) -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository" - echo "Package preparation aborted" - exit 1 -fi - -export LXC_ROOT="/var/lib/lxc" - -# Go through the containers created for packaging and pick up all generated packages -CREATED_CONTAINERS=$(ls -A "${LXC_ROOT}") -for d in ${CREATED_CONTAINERS[@]}; do - echo "Picking up packaging contents from ${d}" - - # Pick up any RPMS from builder - RPM_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}/rpmbuild" - if [ -d "${RPM_BUILD_PATH}" ]; then - echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS" - - if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then - echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/RPMS" - [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}" - fi - - if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then - echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:" - ls -ltrR "${RPM_BUILD_PATH}/SRPMS" - [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}" - [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}" - fi - else - DEB_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}" - echo "Checking folder ${DEB_BUILD_PATH} for DEB packages" - if [ -d "${DEB_BUILD_PATH}" ]; then - cp "${DEB_BUILD_PATH}"/netdata*.ddeb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .ddeb files" - cp "${DEB_BUILD_PATH}"/netdata*.deb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .deb files" - cp "${DEB_BUILD_PATH}"/netdata*.buildinfo "${PACKAGES_DIRECTORY}" || echo "Could not copy any .buildinfo files" - cp "${DEB_BUILD_PATH}"/netdata*.changes "${PACKAGES_DIRECTORY}" || echo "Could not copy any .changes files" - else - echo "Folder ${DEB_BUILD_PATH} does not exist or not a directory, nothing to do for package preparation" - fi - fi -done - -chmod -R 777 "${PACKAGES_DIRECTORY}" -echo "Packaging contents ready to ship!" diff --git a/.travis/package_management/trigger_deb_lxc_build.py b/.travis/package_management/trigger_deb_lxc_build.py deleted file mode 100755 index a0235a73d..000000000 --- a/.travis/package_management/trigger_deb_lxc_build.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 -# -# This script is responsible for running the RPM build on the running container -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis - -import common -import os -import sys -import lxc - -if len(sys.argv) != 2: - print('You need to provide a container name to get things started') - sys.exit(1) -container_name=sys.argv[1] - -# Load the container, break if its not there -print("Starting up container %s" % container_name) -container = lxc.Container(container_name) -if not container.defined: - raise Exception("Container %s does not exist!" % container_name) - -# Check if the container is running, attempt to start it up in case its not running -if not container.running or not container.state == "RUNNING": - print('Container %s is not running, attempt to start it up' % container_name) - - # Start the container - if not container.start(): - raise Exception("Failed to start the container") - - if not container.running or not container.state == "RUNNING": - raise Exception('Container %s is not running, configuration process aborted ' % container_name) - -# Wait for connectivity -if not container.get_ips(timeout=30): - raise Exception("Timeout while waiting for container") - -build_path = "/home/%s" % os.environ['BUILDER_NAME'] - -print("Setting up EMAIL and DEBFULLNAME variables required by the build tools") -os.environ["EMAIL"] = "bot@netdata.cloud" -os.environ["DEBFULLNAME"] = "Netdata builder" - -# Run the build process on the container -new_version, tag = common.fetch_version(os.environ['BUILD_VERSION']) -print("Starting DEB build process for version %s" % new_version) - -netdata_tarball = "%s/netdata-%s.tar.gz" % (build_path, new_version) -unpacked_netdata = netdata_tarball.replace(".tar.gz", "") - -print("Extracting tarball %s" % netdata_tarball) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", netdata_tarball, "-C", build_path]) - -print("Fixing changelog tags") -changelog_in_host = "contrib/debian/changelog" -common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_VERSION/%s-1/g' % os.environ["LATEST_RELEASE_VERSION"].replace("v", ""), changelog_in_host]) -common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_DATE/%s/g' % os.environ["LATEST_RELEASE_DATE"], changelog_in_host]) - -print("Executing gbp dch command..") -common.run_command_in_host(['gbp', 'dch', '--release', '--ignore-branch', '--spawn-editor=snapshot', '--since=%s' % os.environ["LATEST_RELEASE_VERSION"], '--new-version=%s' % new_version]) - -print("Copying over changelog to the destination machine") -common.run_command_in_host(['sudo', 'cp', 'debian/changelog', "%s/%s/netdata-%s/contrib/debian/" % (os.environ['LXC_CONTAINER_ROOT'], build_path, new_version)]) - -print("Running debian build script since %s" % os.environ["LATEST_RELEASE_VERSION"]) -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "%s/build.sh" % build_path, unpacked_netdata, new_version]) - -print("Listing contents on build path") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltr", build_path]) - -print('Done!') diff --git a/.travis/package_management/trigger_rpm_lxc_build.py b/.travis/package_management/trigger_rpm_lxc_build.py deleted file mode 100755 index f9e109c72..000000000 --- a/.travis/package_management/trigger_rpm_lxc_build.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# -# This script is responsible for running the RPM build on the running container -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis - -import common -import os -import sys -import lxc - -if len(sys.argv) != 2: - print('You need to provide a container name to get things started') - sys.exit(1) -container_name=sys.argv[1] - -# Load the container, break if its not there -print("Starting up container %s" % container_name) -container = lxc.Container(container_name) -if not container.defined: - raise Exception("Container %s does not exist!" % container_name) - -# Check if the container is running, attempt to start it up in case its not running -if not container.running or not container.state == "RUNNING": - print('Container %s is not running, attempt to start it up' % container_name) - - # Start the container - if not container.start(): - raise Exception("Failed to start the container") - - if not container.running or not container.state == "RUNNING": - raise Exception('Container %s is not running, configuration process aborted ' % container_name) - -# Wait for connectivity -if not container.get_ips(timeout=30): - raise Exception("Timeout while waiting for container") - -print("Adding builder specific dependencies to the LXC container") -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-build"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-devel"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmlint"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "make"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "python"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "diffutils"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "patch"]) -common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmdevtools"]) - -# Run the build process on the container -print("Starting RPM build process") -common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "rpmbuild", "-ba", "--rebuild", "/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME']]) - -print('Done!') diff --git a/.travis/package_management/yank_stale_rpm.sh b/.travis/package_management/yank_stale_rpm.sh deleted file mode 100755 index 3f7669712..000000000 --- a/.travis/package_management/yank_stale_rpm.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -# -# This script is responsible for the removal of stale RPM/DEB files. -# It runs on the pre-deploy step and takes care of the removal of the files -# prior to the upload of the freshly built ones -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud) -#shellcheck disable=SC2010,SC2068 -set -e - -# If we are not in netdata git repo, at the top level directory, fail -TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)") -CWD=$(git rev-parse --show-cdup) -if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then - echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository" - echo "Package yanking cancelled" - exit 1 -fi - -PACKAGES_DIR="$1" -DISTRO="$2" -PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep -e '\.rpm' -e '\.deb' -e '\.ddeb' )" - -if [ ! -d "${PACKAGES_DIR}" ] || [ -z "${PACKAGES_LIST}" ]; then - echo "Folder ${PACKAGES_DIR} does not seem to be a valid directory or is empty. No packages to check for yanking" - exit 1 -fi - -for pkg in ${PACKAGES_LIST[@]}; do - echo "Attempting yank on ${pkg}.." - .travis/package_management/package_cloud_wrapper.sh yank "${PACKAGING_USER}/${DEPLOY_REPO}/${DISTRO}" "${pkg}" || echo "Nothing to yank or error on ${pkg}" -done - diff --git a/.travis/run_install_with_dist_file.sh b/.travis/run_install_with_dist_file.sh deleted file mode 100755 index ccad627cc..000000000 --- a/.travis/run_install_with_dist_file.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# -# This script is evaluating netdata installation with the source from make dist -# -# Copyright: SPDX-License-Identifier: GPL-3.0-or-later -# -# Author : Pavlos Emm. Katsoulakis