summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/dashboard/ci
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/dashboard/ci')
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh39
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml45
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh59
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh80
-rw-r--r--src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py179
5 files changed, 402 insertions, 0 deletions
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
new file mode 100755
index 000000000..1c2c4b3cd
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+set -x
+
+export PATH=/root/bin:$PATH
+mkdir /root/bin
+
+export CEPHADM_IMAGE='quay.ceph.io/ceph-ci/ceph:reef'
+
+CEPHADM="/root/bin/cephadm"
+
+/mnt/{{ ceph_dev_folder }}/src/cephadm/build.sh $CEPHADM
+mkdir -p /etc/ceph
+mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}')
+
+bootstrap_extra_options='--allow-fqdn-hostname --dashboard-password-noupdate'
+
+# commenting the below lines. Uncomment it when any extra options are
+# needed for the bootstrap.
+# bootstrap_extra_options_not_expanded=''
+# {% if expanded_cluster is not defined %}
+# bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}"
+# {% endif %}
+
+$CEPHADM bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --shared_ceph_folder /mnt/{{ ceph_dev_folder }} ${bootstrap_extra_options}
+
+fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
+cephadm_shell="$CEPHADM shell --fsid ${fsid} -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring"
+
+{% for number in range(1, nodes) %}
+ ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@192.168.100.10{{ number }}
+ {% if expanded_cluster is defined %}
+ ${cephadm_shell} ceph orch host add {{ prefix }}-node-0{{ number }}
+ {% endif %}
+{% endfor %}
+
+{% if expanded_cluster is defined %}
+ ${cephadm_shell} ceph orch apply osd --all-available-devices
+{% endif %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
new file mode 100755
index 000000000..a334fbad5
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
@@ -0,0 +1,45 @@
+parameters:
+ nodes: 4
+ node_ip_offset: 100
+ pool: ceph-dashboard
+ network: ceph-dashboard
+ gateway: 192.168.100.1
+ netmask: 255.255.255.0
+ prefix: ceph
+ numcpus: 1
+ memory: 2048
+ image: fedora36
+ notify: false
+ admin_password: password
+ disks:
+ - 15
+ - 5
+ - 5
+
+{% for number in range(0, nodes) %}
+{{ prefix }}-node-0{{ number }}:
+ image: {{ image }}
+ numcpus: {{ numcpus }}
+ memory: {{ memory }}
+ reserveip: true
+ reservedns: true
+ sharedkey: true
+ nets:
+ - name: {{ network }}
+ ip: 192.168.100.{{ node_ip_offset + number }}
+ gateway: {{ gateway }}
+ mask: {{ netmask }}
+ dns: {{ gateway }}
+ disks: {{ disks }}
+ pool: {{ pool }}
+ sharedfolders: [{{ ceph_dev_folder }}]
+ files:
+ - bootstrap-cluster.sh
+ cmds:
+ - dnf -y install python3 chrony lvm2 podman
+ - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
+ - setenforce 0
+ {% if number == 0 %}
+ - bash /root/bootstrap-cluster.sh
+ {% endif %}
+{% endfor %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
new file mode 100755
index 000000000..a48f759f5
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+set -ex
+
+: ${CYPRESS_BASE_URL:=''}
+: ${CYPRESS_LOGIN_USER:='admin'}
+: ${CYPRESS_LOGIN_PWD:='password'}
+: ${CYPRESS_ARGS:=''}
+: ${DASHBOARD_PORT:='8443'}
+
+get_vm_ip () {
+ local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
+ echo -n $ip
+}
+
+if [[ -n "${JENKINS_HOME}" || (-z "${CYPRESS_BASE_URL}" && -z "$(get_vm_ip ceph-node-00)") ]]; then
+ . "$(dirname $0)"/start-cluster.sh
+
+ CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}"
+fi
+
+export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
+
+cypress_run () {
+ local specs="$1"
+ local timeout="$2"
+ local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs},chromeWebSecurity=false"
+ if [[ -n "$timeout" ]]; then
+ override_config="${override_config},defaultCommandTimeout=${timeout}"
+ fi
+
+ rm -f cypress/reports/results-*.xml || true
+
+ npx --no-install cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
+}
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+
+kcli ssh -u root ceph-node-00 'cephadm shell "ceph config set mgr mgr/prometheus/exclude_perf_counters false"'
+
+# check if the prometheus daemon is running
+# before starting the e2e tests
+
+PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running')
+while [[ $PROMETHEUS_RUNNING_COUNT -lt 1 ]]; do
+ PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running')
+done
+
+# grafana ip address is set to the fqdn by default.
+# kcli is not working with that, so setting the IP manually.
+kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-alertmanager-api-host http://192.168.100.100:9093"'
+kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-prometheus-api-host http://192.168.100.100:9095"'
+kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-grafana-api-url https://192.168.100.100:3000"'
+kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch apply node-exporter --placement 'count:2'"'
+
+cypress_run ["cypress/e2e/orchestrator/workflow/*.feature","cypress/e2e/orchestrator/workflow/*-spec.ts"]
+cypress_run "cypress/e2e/orchestrator/grafana/*.feature"
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
new file mode 100755
index 000000000..65cb78a45
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+set -eEx
+
+on_error() {
+ set +x
+ if [ "$1" != "0" ]; then
+ echo "ERROR $1 thrown on line $2"
+ echo
+ echo "Collecting info..."
+ echo
+ echo "Saving MGR logs:"
+ echo
+ mkdir -p ${CEPH_DEV_FOLDER}/logs
+ kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log
+ for vm_id in {0..3}
+ do
+ local vm="ceph-node-0${vm_id}"
+ echo "Saving journalctl from VM ${vm}:"
+ echo
+ kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true
+ echo "Saving container logs:"
+ echo
+ kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true
+ done
+ echo "TEST FAILED."
+ fi
+}
+
+trap 'on_error $? $LINENO' ERR
+
+sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts || true
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+EXTRA_PARAMS=''
+DEV_MODE=''
+# Check script args/options.
+for arg in "$@"; do
+ shift
+ case "$arg" in
+ "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
+ "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
+ esac
+done
+
+kcli delete plan -y ceph || true
+
+# Build dashboard frontend (required to start the module).
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+export NG_CLI_ANALYTICS=false
+if [[ -n "$JENKINS_HOME" ]]; then
+ npm cache clean --force
+fi
+npm ci
+FRONTEND_BUILD_OPTS='--configuration=production'
+if [[ -n "${DEV_MODE}" ]]; then
+ FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
+fi
+npm run build ${FRONTEND_BUILD_OPTS} &
+
+cd ${CEPH_DEV_FOLDER}
+: ${VM_IMAGE:='fedora36'}
+: ${VM_IMAGE_URL:='https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2'}
+kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
+kcli delete plan -y ceph || true
+kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
+ -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
+ ${EXTRA_PARAMS} ceph
+
+: ${CLUSTER_DEBUG:=0}
+: ${DASHBOARD_CHECK_INTERVAL:=10}
+while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
+ sleep ${DASHBOARD_CHECK_INTERVAL}
+ kcli list vm
+ if [[ ${CLUSTER_DEBUG} != 0 ]]; then
+ kcli ssh -u root -- ceph-node-00 'podman ps -a'
+ kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
+ fi
+ kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
+done
diff --git a/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py
new file mode 100644
index 000000000..d37337b40
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=F0401
+"""
+This script does:
+* Scan through Angular html templates and extract <cd-grafana> tags
+* Check if every tag has a corresponding Grafana dashboard by `uid`
+
+Usage:
+ python <script> <angular_app_dir> <grafana_dashboard_dir>
+
+e.g.
+ cd /ceph/src/pybind/mgr/dashboard
+ python ci/<script> frontend/src/app /ceph/monitoring/ceph-mixin/dashboards_out
+"""
+import argparse
+import codecs
+import copy
+import json
+import os
+from html.parser import HTMLParser
+
+
+class TemplateParser(HTMLParser):
+
+ def __init__(self, _file, search_tag):
+ super().__init__()
+ self.search_tag = search_tag
+ self.file = _file
+ self.parsed_data = []
+
+ def parse(self):
+ with codecs.open(self.file, encoding='UTF-8') as f:
+ self.feed(f.read())
+
+ def handle_starttag(self, tag, attrs):
+ if tag != self.search_tag:
+ return
+ tag_data = {
+ 'file': self.file,
+ 'attrs': dict(attrs),
+ 'line': self.getpos()[0]
+ }
+ self.parsed_data.append(tag_data)
+
+ def error(self, message):
+ error_msg = 'fail to parse file {} (@{}): {}'.\
+ format(self.file, self.getpos(), message)
+ exit(error_msg)
+
+
+def get_files(base_dir, file_ext):
+ result = []
+ for root, _, files in os.walk(base_dir):
+ for _file in files:
+ if _file.endswith('.{}'.format(file_ext)):
+ result.append(os.path.join(root, _file))
+ return result
+
+
+def get_tags(base_dir, tag='cd-grafana'):
+ templates = get_files(base_dir, 'html')
+ tags = []
+ for templ in templates:
+ parser = TemplateParser(templ, tag)
+ parser.parse()
+ if parser.parsed_data:
+ tags.extend(parser.parsed_data)
+ return tags
+
+
+def get_grafana_dashboards(base_dir):
+ json_files = get_files(base_dir, 'json')
+ dashboards = {}
+ for json_file in json_files:
+ try:
+ with open(json_file) as f:
+ dashboard_config = json.load(f)
+ uid = dashboard_config.get('uid')
+ # if it's not a grafana dashboard, skip checks
+ # Fields in a dasbhoard:
+ # https://grafana.com/docs/grafana/latest/dashboards/json-model/#json-fields
+ expected_fields = [
+ 'id', 'uid', 'title', 'tags', 'style', 'timezone', 'editable',
+ 'hideControls', 'graphTooltip', 'panels', 'time', 'timepicker',
+ 'templating', 'annotations', 'refresh', 'schemaVersion', 'version', 'links',
+ ]
+ not_a_dashboard = False
+ for field in expected_fields:
+ if field not in dashboard_config:
+ not_a_dashboard = True
+ break
+ if not_a_dashboard:
+ continue
+
+ assert dashboard_config['id'] is None, \
+ "'id' not null: '{}'".format(dashboard_config['id'])
+
+ assert 'timezone' not in dashboard_config or dashboard_config['timezone'] == '', \
+ ("'timezone' field must not be set to anything but an empty string or be "
+ "omitted completely")
+
+ # Grafana dashboard checks
+ title = dashboard_config['title']
+ assert len(title) > 0, \
+ "Title not found in '{}'".format(json_file)
+ assert len(dashboard_config.get('links', [])) == 0, \
+ "Links found in '{}'".format(json_file)
+ if not uid:
+ continue
+ if uid in dashboards:
+ # duplicated uids
+ error_msg = 'Duplicated UID {} found, already defined in {}'.\
+ format(uid, dashboards[uid]['file'])
+ exit(error_msg)
+
+ dashboards[uid] = {
+ 'file': json_file,
+ 'title': title
+ }
+ except Exception as e:
+ print(f"Error in file {json_file}")
+ raise e
+ return dashboards
+
+
+def parse_args():
+ long_desc = ('Check every <cd-grafana> component in Angular template has a'
+ ' mapped Grafana dashboard.')
+ parser = argparse.ArgumentParser(description=long_desc)
+ parser.add_argument('angular_app_dir', type=str,
+ help='Angular app base directory')
+ parser.add_argument('grafana_dash_dir', type=str,
+ help='Directory contains Grafana dashboard JSON files')
+ parser.add_argument('--verbose', action='store_true',
+ help='Display verbose mapping information.')
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ tags = get_tags(args.angular_app_dir)
+ grafana_dashboards = get_grafana_dashboards(args.grafana_dash_dir)
+ verbose = args.verbose
+
+ if not tags:
+ error_msg = 'Can not find any cd-grafana component under {}'.\
+ format(args.angular_app_dir)
+ exit(error_msg)
+
+ if verbose:
+ print('Found mappings:')
+ no_dashboard_tags = []
+ for tag in tags:
+ uid = tag['attrs']['uid']
+ if uid not in grafana_dashboards:
+ no_dashboard_tags.append(copy.copy(tag))
+ continue
+ if verbose:
+ msg = '{} ({}:{}) \n\t-> {} ({})'.\
+ format(uid, tag['file'], tag['line'],
+ grafana_dashboards[uid]['title'],
+ grafana_dashboards[uid]['file'])
+ print(msg)
+
+ if no_dashboard_tags:
+ title = ('Checking Grafana dashboards UIDs: ERROR\n'
+ 'Components that have no mapped Grafana dashboards:\n')
+ lines = ('{} ({}:{})'.format(tag['attrs']['uid'],
+ tag['file'],
+ tag['line'])
+ for tag in no_dashboard_tags)
+ error_msg = title + '\n'.join(lines)
+ exit(error_msg)
+ else:
+ print('Checking Grafana dashboards UIDs: OK')
+
+
+if __name__ == '__main__':
+ main()