From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- .../mgr/dashboard/ci/cephadm/bootstrap-cluster.sh | 39 +++++ .../mgr/dashboard/ci/cephadm/ceph_cluster.yml | 45 ++++++ .../dashboard/ci/cephadm/run-cephadm-e2e-tests.sh | 59 +++++++ .../mgr/dashboard/ci/cephadm/start-cluster.sh | 80 +++++++++ .../mgr/dashboard/ci/check_grafana_dashboards.py | 179 +++++++++++++++++++++ 5 files changed, 402 insertions(+) create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh create mode 100644 src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py (limited to 'src/pybind/mgr/dashboard/ci') diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh new file mode 100755 index 000000000..1c2c4b3cd --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -x + +export PATH=/root/bin:$PATH +mkdir /root/bin + +export CEPHADM_IMAGE='quay.ceph.io/ceph-ci/ceph:reef' + +CEPHADM="/root/bin/cephadm" + +/mnt/{{ ceph_dev_folder }}/src/cephadm/build.sh $CEPHADM +mkdir -p /etc/ceph +mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}') + +bootstrap_extra_options='--allow-fqdn-hostname --dashboard-password-noupdate' + +# commenting the below lines. Uncomment it when any extra options are +# needed for the bootstrap. +# bootstrap_extra_options_not_expanded='' +# {% if expanded_cluster is not defined %} +# bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}" +# {% endif %} + +$CEPHADM bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --shared_ceph_folder /mnt/{{ ceph_dev_folder }} ${bootstrap_extra_options} + +fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}') +cephadm_shell="$CEPHADM shell --fsid ${fsid} -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring" + +{% for number in range(1, nodes) %} + ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@192.168.100.10{{ number }} + {% if expanded_cluster is defined %} + ${cephadm_shell} ceph orch host add {{ prefix }}-node-0{{ number }} + {% endif %} +{% endfor %} + +{% if expanded_cluster is defined %} + ${cephadm_shell} ceph orch apply osd --all-available-devices +{% endif %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml new file mode 100755 index 000000000..a334fbad5 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml @@ -0,0 +1,45 @@ +parameters: + nodes: 4 + node_ip_offset: 100 + pool: ceph-dashboard + network: ceph-dashboard + gateway: 192.168.100.1 + netmask: 255.255.255.0 + prefix: ceph + numcpus: 1 + memory: 2048 + image: fedora36 + notify: false + admin_password: password + disks: + - 15 + - 5 + - 5 + +{% for number in range(0, nodes) %} +{{ prefix }}-node-0{{ number }}: + image: {{ image }} + numcpus: {{ numcpus }} + memory: {{ memory }} + reserveip: true + reservedns: true + sharedkey: true + nets: + - name: {{ network }} + ip: 192.168.100.{{ node_ip_offset + number }} + gateway: {{ gateway }} + mask: {{ netmask }} + dns: {{ gateway }} + disks: {{ disks }} + pool: {{ pool }} + sharedfolders: [{{ ceph_dev_folder }}] + files: + - bootstrap-cluster.sh + cmds: + - dnf -y install python3 chrony lvm2 podman + - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config + - setenforce 0 + {% if number == 0 %} + - bash /root/bootstrap-cluster.sh + {% endif %} +{% endfor %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh new file mode 100755 index 000000000..a48f759f5 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -ex + +: ${CYPRESS_BASE_URL:=''} +: ${CYPRESS_LOGIN_USER:='admin'} +: ${CYPRESS_LOGIN_PWD:='password'} +: ${CYPRESS_ARGS:=''} +: ${DASHBOARD_PORT:='8443'} + +get_vm_ip () { + local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') + echo -n $ip +} + +if [[ -n "${JENKINS_HOME}" || (-z "${CYPRESS_BASE_URL}" && -z "$(get_vm_ip ceph-node-00)") ]]; then + . "$(dirname $0)"/start-cluster.sh + + CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}" +fi + +export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD + +cypress_run () { + local specs="$1" + local timeout="$2" + local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs},chromeWebSecurity=false" + if [[ -n "$timeout" ]]; then + override_config="${override_config},defaultCommandTimeout=${timeout}" + fi + + rm -f cypress/reports/results-*.xml || true + + npx --no-install cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config" +} + +: ${CEPH_DEV_FOLDER:=${PWD}} + +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend + +kcli ssh -u root ceph-node-00 'cephadm shell "ceph config set mgr mgr/prometheus/exclude_perf_counters false"' + +# check if the prometheus daemon is running +# before starting the e2e tests + +PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +while [[ $PROMETHEUS_RUNNING_COUNT -lt 1 ]]; do + PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +done + +# grafana ip address is set to the fqdn by default. +# kcli is not working with that, so setting the IP manually. +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-alertmanager-api-host http://192.168.100.100:9093"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-prometheus-api-host http://192.168.100.100:9095"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-grafana-api-url https://192.168.100.100:3000"' +kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch apply node-exporter --placement 'count:2'"' + +cypress_run ["cypress/e2e/orchestrator/workflow/*.feature","cypress/e2e/orchestrator/workflow/*-spec.ts"] +cypress_run "cypress/e2e/orchestrator/grafana/*.feature" diff --git a/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh new file mode 100755 index 000000000..65cb78a45 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +set -eEx + +on_error() { + set +x + if [ "$1" != "0" ]; then + echo "ERROR $1 thrown on line $2" + echo + echo "Collecting info..." + echo + echo "Saving MGR logs:" + echo + mkdir -p ${CEPH_DEV_FOLDER}/logs + kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log + for vm_id in {0..3} + do + local vm="ceph-node-0${vm_id}" + echo "Saving journalctl from VM ${vm}:" + echo + kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true + echo "Saving container logs:" + echo + kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true + done + echo "TEST FAILED." + fi +} + +trap 'on_error $? $LINENO' ERR + +sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts || true + +: ${CEPH_DEV_FOLDER:=${PWD}} +EXTRA_PARAMS='' +DEV_MODE='' +# Check script args/options. +for arg in "$@"; do + shift + case "$arg" in + "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;; + "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;; + esac +done + +kcli delete plan -y ceph || true + +# Build dashboard frontend (required to start the module). +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend +export NG_CLI_ANALYTICS=false +if [[ -n "$JENKINS_HOME" ]]; then + npm cache clean --force +fi +npm ci +FRONTEND_BUILD_OPTS='--configuration=production' +if [[ -n "${DEV_MODE}" ]]; then + FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch' +fi +npm run build ${FRONTEND_BUILD_OPTS} & + +cd ${CEPH_DEV_FOLDER} +: ${VM_IMAGE:='fedora36'} +: ${VM_IMAGE_URL:='https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2'} +kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE} +kcli delete plan -y ceph || true +kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \ + -P ceph_dev_folder=${CEPH_DEV_FOLDER} \ + ${EXTRA_PARAMS} ceph + +: ${CLUSTER_DEBUG:=0} +: ${DASHBOARD_CHECK_INTERVAL:=10} +while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do + sleep ${DASHBOARD_CHECK_INTERVAL} + kcli list vm + if [[ ${CLUSTER_DEBUG} != 0 ]]; then + kcli ssh -u root -- ceph-node-00 'podman ps -a' + kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)' + fi + kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init' +done diff --git a/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py new file mode 100644 index 000000000..d37337b40 --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# pylint: disable=F0401 +""" +This script does: +* Scan through Angular html templates and extract tags +* Check if every tag has a corresponding Grafana dashboard by `uid` + +Usage: + python