summaryrefslogtreecommitdiffstats
path: root/test/run-functional-tests
diff options
context:
space:
mode:
Diffstat (limited to 'test/run-functional-tests')
-rwxr-xr-xtest/run-functional-tests551
1 files changed, 551 insertions, 0 deletions
diff --git a/test/run-functional-tests b/test/run-functional-tests
new file mode 100755
index 0000000..5c3bca7
--- /dev/null
+++ b/test/run-functional-tests
@@ -0,0 +1,551 @@
+#!/bin/bash
+DOCKER_IMAGE=${DOCKER_IMAGE:-"nyang23/haleap:15.5"}
+PROJECT_PATH=$(dirname $(dirname `realpath $0`))
+PROJECT_INSIDE="/opt/crmsh"
+DOCKER_SERVICE="docker.service"
+COROSYNC_CONF="/etc/corosync/corosync.conf"
+COROSYNC_AUTH="/etc/corosync/authkey"
+HA_NETWORK_FIRST="ha_network_first"
+HA_NETWORK_SECOND="ha_network_second"
+declare -a HA_NETWORK_ARRAY
+declare -a HA_NETWORK_V6_ARRAY
+HA_NETWORK_ARRAY[0]=$HA_NETWORK_FIRST
+HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND
+HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64"
+HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
+BEHAVE_CASE_DIR="$(dirname $0)/features/"
+BEHAVE_CASE_EXCLUDE="sbd|ocfs2"
+
+read -r -d '' SSHD_CONFIG_AZURE << EOM
+PermitRootLogin no
+AuthorizedKeysFile .ssh/authorized_keys
+ChallengeResponseAuthentication no
+UsePAM yes
+X11Forwarding yes
+ClientAliveInterval 180
+Subsystem sftp /usr/lib/ssh/sftp-server
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL
+PasswordAuthentication no
+EOM
+
+read -r -d '' COROSYNC_CONF_TEMPLATE << EOM
+totem {
+ version: 2
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ interface {
+ ringnumber: 0
+ mcastport: 5405
+ ttl: 1
+ }
+
+ transport: udpu
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ token: 5000
+ join: 60
+ max_messages: 20
+ token_retransmits_before_loss_const: 10
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+nodelist {
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
+EOM
+
+
+fatal() {
+ error $*
+ exit 1
+}
+
+
+error() {
+ echo "ERROR: $*"
+}
+
+
+warning() {
+ echo "WARNING: $*"
+}
+
+
+info() {
+ echo "INFO: $*"
+}
+
+
+is_number() {
+ num=$1
+ test ! -z "$num" && test "$num" -eq "$num" 2> /dev/null && test "$num" -gt 0 2> /dev/null
+}
+
+
+check_docker_env() {
+ # check if docker available
+ systemctl list-unit-files $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not available"
+ fi
+ # check if docker.service started
+ systemctl is-active $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not active"
+ fi
+ # check if docker cgroup driver is systemd
+ docker info 2> /dev/null|grep -q "Cgroup Driver: systemd"
+ if [ "$?" -ne 0 ];then
+ warning "docker cgroup driver suggest to be \"systemd\""
+ fi
+
+ [ "$1" == "cleanup" ] && return
+ # check if ha network already exists
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q "$network"
+ if [ "$?" -eq 0 ];then
+ fatal "HA specific network \"$network\" already exists"
+ fi
+ done
+}
+
+
+get_test_case_array() {
+ test -d $BEHAVE_CASE_DIR || fatal "Cannot find '$BEHAVE_CASE_DIR'"
+ ls $BEHAVE_CASE_DIR|grep "\.feature"|grep -Ev "$BEHAVE_CASE_EXCLUDE"
+}
+
+
+echo_test_cases() {
+ case_arry=`get_test_case_array`
+ echo "Index|File Name|Description"
+ index=1
+ for f in ${case_arry[@]};do
+ desc=`awk -F: '/Feature/{print $2}' $BEHAVE_CASE_DIR/$f`
+ printf "%3s %-40s %-60s\n" $index $f "$desc"
+ index=$(($index+1))
+ done
+ printf "%3s %-40s %-60s\n" $index "regression test" "Original regression test"
+}
+
+
+usage_and_exit() {
+ prog=`basename $0`
+ cat <<END
+Usage: $prog [OPTIONS]|[TESTCASE INDEX]
+$prog is a tool for developers to setup the cluster in containers to run functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+ -h, --help Show this help message and exit
+ -l List existing functional test cases and exit
+ -n NUM Only setup a cluster with NUM nodes(containers)
+ -x Don't config corosync on containers(with -n option)
+ -d Cleanup the cluster containers
+ -u Create normal users, and Azure like ssh environment
+ -q Create a qnetd node(with -n and -x option)
+
+EXAMPLES:
+To launch 2 nodes with the running cluster with the very basic corosync.conf
+# crmsh.git/test/run-functional-tests -n 2
+
+To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
+# crmsh.git/run-functional-tests -n 2 -x
+
+To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
+# crmsh.git/run-functional-tests -n 2 -x -q
+
+To list the existing test cases. Users could add his own new test cases.
+# crmsh.git/test/run-functional-tests -l
+
+To run a single or a number of functional test cases
+# crmsh.git/test/run-functional-tests 1
+# crmsh.git/test/run-functional-tests 1 2 3
+
+To clean up the all containers which are generated by this tool
+# crmsh.git/test/run-functional-tests -d
+END
+ exit 1
+}
+
+
+docker_exec() {
+ name=$1
+ cmd=$2
+ docker exec -t $name /bin/sh -c "$cmd"
+}
+
+set_sshd_config_like_in_azure() {
+ node_name=$1
+ docker_exec $node_name "echo \"$SSHD_CONFIG_AZURE\" > /etc/ssh/sshd_config"
+ docker_exec $node_name "systemctl restart sshd.service"
+}
+
+create_custom_user() {
+ user_name=$1
+ user_id=$2
+ docker_exec $node_name "useradd -m -s /bin/bash ${user_name} 2>/dev/null"
+ docker_exec $node_name "chmod u+w /etc/sudoers"
+ docker_exec $node_name "echo \"${user_name} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers"
+ docker_exec $node_name "chmod u-w /etc/sudoers"
+ docker_exec $node_name "echo 'export PATH=\$PATH:/usr/sbin/' >> ~${user_name}/.bashrc"
+ docker_exec $node_name "echo -e \"linux\\nlinux\" | passwd ${user_name} 2>/dev/null"
+ docker_exec $node_name "cp -r /root/.ssh ~${user_name}/ && chown ${user_name}:haclient -R ~${user_name}/.ssh"
+ info "Create user '$user_name' on $node_name"
+}
+
+create_alice_bob_carol() {
+ # Custom users are alice, bob and carol and they are as important as the root
+ # and eventually they should be already in the docker image
+ # However now, let's create them here
+ create_custom_user "alice" "1000"
+ create_custom_user "bob" "1001"
+ create_custom_user "carol" "1002"
+}
+
+deploy_ha_node() {
+ node_name=$1
+ docker_options="-d --name $node_name -h $node_name --privileged --shm-size 1g"
+ make_cmd="cd $PROJECT_INSIDE;./autogen.sh && ./configure --prefix /usr && make install && make install-crmconfDATA prefix= && cp /usr/bin/crm /usr/sbin"
+
+ info "Deploying \"$node_name\"..."
+ docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network connect $network $node_name
+ done
+
+ if [ "$node_name" != "qnetd-node" ];then
+ rm_qnetd_cmd="rpm -q corosync-qnetd && rpm -e corosync-qnetd"
+ docker_exec $node_name "$rm_qnetd_cmd" &> /dev/null
+ fi
+ docker_exec $node_name "rm -rf /run/nologin"
+ docker_exec $node_name "echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"
+
+ if [ "$node_name" != "qnetd-node" ];then
+ docker cp $PROJECT_PATH $node_name:/opt/crmsh
+ info "Building crmsh on \"$node_name\"..."
+ docker_exec $node_name "$make_cmd" 1> /dev/null || \
+ fatal "Building failed on $node_name!"
+ docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
+ docker_exec $node_name "chmod g+w -R /var/log/crmsh"
+ create_alice_bob_carol
+ if [ "$NORMAL_USER_FLAG" -eq 1 ];then
+ set_sshd_config_like_in_azure $node_name
+ fi
+ else
+ docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
+ docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
+ docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
+ info "Create user 'alice' on $node_name"
+ [ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
+ fi
+}
+
+
+create_node() {
+ info "Loading docker image $DOCKER_IMAGE..."
+ docker pull $DOCKER_IMAGE &> /dev/null
+
+ for index in ${!HA_NETWORK_ARRAY[@]};do
+ network=${HA_NETWORK_ARRAY[$index]}
+ info "Create ha specific docker network \"$network\"..."
+ docker network create --ipv6 --subnet ${HA_NETWORK_V6_ARRAY[$index]} $network &> /dev/null
+ done
+
+ for node in $*;do
+ deploy_ha_node $node &
+ done
+ wait
+}
+
+
+config_cluster() {
+ node_num=$#
+ insert_str=""
+ container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f '{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
+
+ for i in $(seq $node_num -1 1);do
+ ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
+ insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: $i\n\t}"
+ done
+ corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo "$COROSYNC_CONF_TEMPLATE"))
+ if [ $node_num -eq 2 ];then
+ corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 1" <(echo "$corosync_conf_str"))
+ fi
+
+ info "Copy corosync.conf to $*"
+ for node in $*;do
+ if [ $node == $1 ];then
+ docker_exec $1 "echo \"$corosync_conf_str\" >> $COROSYNC_CONF"
+ docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
+ else
+ while :
+ do
+ docker_exec $1 "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
+ sleep 1
+ done
+ docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
+ fi
+ done
+}
+
+
+start_cluster() {
+ for node in $*;do
+ docker_exec $node "crm cluster enable && crm cluster start" 1> /dev/null
+ if [ "$?" -eq 0 ];then
+ info "Cluster service started on \"$node\""
+ else
+ fatal "Failed to start cluster service on \"$node\""
+ fi
+ done
+}
+
+
+container_already_exists() {
+ docker ps -a|grep -q "$1"
+ if [ "$?" -eq 0 ];then
+ fatal "Container \"$1\" already running"
+ fi
+}
+
+
+setup_cluster() {
+ hanodes_arry=()
+ is_number $1
+ if [ "$?" -eq 0 ];then
+ for i in $(seq 1 $1);do
+ hanodes_arry+=("hanode$i")
+ done
+ else
+ hanodes_arry=($*)
+ fi
+
+ if [ $WITH_QNETD_NODE -eq 1 ];then
+ create_node ${hanodes_arry[@]} "qnetd-node"
+ else
+ create_node ${hanodes_arry[@]}
+ fi
+
+ [ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
+ config_cluster ${hanodes_arry[@]}
+ start_cluster ${hanodes_arry[@]}
+ docker_exec "hanode1" "crm configure property stonith-enabled=false"
+}
+
+
+cleanup_container() {
+ node=$1
+ info "Cleanup container \"$node\"..."
+ docker container stop $node &> /dev/null
+ docker container rm $node &> /dev/null
+}
+
+
+cleanup_cluster() {
+ exist_network_array=()
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q $network && exist_network_array+=($network)
+ done
+ if [ ${#exist_network_array[@]} -eq 0 ];then
+ info "Already cleaned up"
+ return 0
+ fi
+
+ container_array=(`docker network inspect $exist_network_array -f '{{range .Containers}}{{printf "%s " .Name}}{{end}}'`)
+ for node in ${container_array[@]};do
+ cleanup_container $node &
+ done
+ wait
+
+ for network in ${exist_network_array[@]};do
+ info "Cleanup ha specific docker network \"$network\"..."
+ docker network rm $network &> /dev/null
+ done
+}
+
+
+adjust_test_case() {
+ node_name=$1
+ replace_arry=(`docker_exec $node_name "grep -o -E '@(hanode[0-9]+|qnetd-node)\.ip[6]?\.(default|[0-9])' $2|sort -u|dos2unix"`)
+ for item in ${replace_arry[@]};do
+ item_str=${item##@}
+ node=`echo $item_str|cut -d "." -f 1`
+ ip_version=`echo $item_str|cut -d "." -f 2|tr -d "\r"`
+ ip_search_str="IPAddress"
+ if [ "$ip_version" == "ip6" ];then
+ ip_search_str="GlobalIPv6Address"
+ fi
+ index=`echo $item_str|cut -d "." -f 3|tr -d "\r"`
+ if [ "$index" == "default" ];then
+ ip=`docker container inspect $node -f "{{range .NetworkSettings.Networks}}{{printf \"%s \" .$ip_search_str}}{{end}}"|awk '{print $1}'|tr -d "\r"`
+ else
+ ip=`docker container inspect $node -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[$index]}.$ip_search_str}}"|tr -d "\r"`
+ fi
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$ip/g $2"
+ done
+
+ vip_replace_array=(`docker_exec $node_name "grep -o -E '@vip\.[0-9]' $2|sort -u|dos2unix"`)
+ for item in ${vip_replace_array[@]};do
+ index=`echo $item|cut -d "." -f 2|tr -d "\r"`
+ suffix=$((123+index))
+ ip=`docker container inspect $node_name -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[0]}.IPAddress}}"|tr -d "\r"`
+ vip=`echo $ip|sed "s/\.[0-9][0-9]*$/\.$suffix/g"|tr -d "\r"`
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$vip/g $2"
+ done
+}
+
+
+run_origin_regression_test() {
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster "hanode1"
+ docker_exec "hanode1" "sh /usr/share/crmsh/tests/regression.sh"
+ return $?
+}
+
+
+prepare_coverage_env() {
+ for node in $*; do
+ docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
+ done
+}
+
+
+fetch_coverage_report() {
+ local unique_id=$(dd if=/dev/urandom count=1 bs=6 | base64 | tr '+/' '-_')
+ for node in $*; do
+ docker_exec "$node" "coverage combine; coverage xml -o /opt/coverage.xml"
+ # see https://github.com/codecov/codecov-cli/blob/master/codecov_cli/services/upload/coverage_file_finder.py
+ docker cp "$node":/opt/coverage.xml coverage."$unique_id"."$node".xml
+ done
+}
+
+
+WITH_QNETD_NODE=0
+NORMAL_USER_FLAG=0
+CONFIG_COROSYNC_FLAG=1
+SETUP_N_NODES_CLUSTER=0
+options=$(getopt -l "help" -o "hldxuqn:" -- "$@")
+eval set -- "$options"
+while true;do
+case $1 in
+-h|--help) usage_and_exit;;
+-l)
+ echo_test_cases
+ exit 0
+ ;;
+-d)
+ check_docker_env cleanup
+ cleanup_cluster
+ exit $?
+ ;;
+-x)
+ CONFIG_COROSYNC_FLAG=0
+ shift
+ ;;
+-u)
+ NORMAL_USER_FLAG=1
+ shift
+ ;;
+-q)
+ WITH_QNETD_NODE=1
+ shift
+ ;;
+-n)
+ check_docker_env
+ shift
+ is_number $1 || fatal "-n option need a number larger than 0"
+ SETUP_N_NODES_CLUSTER=$1
+ shift
+ ;;
+--)
+ shift
+ break
+ ;;
+esac
+done
+
+if [ $SETUP_N_NODES_CLUSTER -ge 1 ];then
+ setup_cluster $SETUP_N_NODES_CLUSTER
+ exit $?
+fi
+
+if [ "$#" -eq 0 ];then
+ usage_and_exit
+fi
+
+# used by github action
+if [ "$1" == "_get_index_of" ];then
+ shift
+ pattern=""
+ for item in $*;do
+ pattern+="${item}|"
+ done
+ echo_test_cases|grep -E "(${pattern%%|})(\.feature|\s+Original regression test)"|awk '{print $1}'|tr -s '\n' ' '
+ exit 0
+fi
+
+for case_num in $*;do
+ echo_test_cases|grep -E "\s+$case_num\s" &> /dev/null
+ if [ "$?" -ne 0 ];then
+ error "\"$case_num\" is an invalid index"
+ echo_test_cases
+ exit 1
+ fi
+done
+
+for case_num in $*;do
+ if [ "$case_num" -ne $1 ];then
+ check_docker_env cleanup
+ cleanup_cluster
+ echo
+ fi
+ check_docker_env
+ test_case_array=(`get_test_case_array`)
+ if [ $case_num -gt ${#test_case_array[*]} ];then
+ run_origin_regression_test || exit 1
+ continue
+ fi
+ case_file=$BEHAVE_CASE_DIR/${test_case_array[$((case_num-1))]}
+ case_file_in_container="$PROJECT_INSIDE/test/features/`basename $case_file`"
+ node_arry=(`awk -F: '/Need nodes/{print $2}' $case_file`)
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster ${node_arry[@]}
+ adjust_test_case ${node_arry[0]} $case_file_in_container
+ echo
+ prepare_coverage_env "${node_arry[@]}"
+ if [ "$NORMAL_USER_FLAG" -eq 0 ];then
+ info "Running \"$case_file_in_container\" under 'root'..."
+ docker_exec ${node_arry[0]} "behave --no-logcapture $case_file_in_container || exit 1" || exit 1
+ else
+ info "Running \"$case_file_in_container\" under normal user 'alice'..."
+ docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
+ fi
+ fetch_coverage_report "${node_arry[@]}"
+ echo
+done