summaryrefslogtreecommitdiffstats
path: root/test/features/bootstrap_bugs.feature
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:48:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:48:59 +0000
commitd835b2cae8abc71958b69362162e6a70c3d7ef63 (patch)
tree81052e3d2ce3e1bcda085f73d925e9d6257dec15 /test/features/bootstrap_bugs.feature
parentInitial commit. (diff)
downloadcrmsh-d835b2cae8abc71958b69362162e6a70c3d7ef63.tar.xz
crmsh-d835b2cae8abc71958b69362162e6a70c3d7ef63.zip
Adding upstream version 4.6.0.upstream/4.6.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/features/bootstrap_bugs.feature')
-rw-r--r--test/features/bootstrap_bugs.feature251
1 files changed, 251 insertions, 0 deletions
diff --git a/test/features/bootstrap_bugs.feature b/test/features/bootstrap_bugs.feature
new file mode 100644
index 0000000..e6a2d6e
--- /dev/null
+++ b/test/features/bootstrap_bugs.feature
@@ -0,0 +1,251 @@
+@bootstrap
+Feature: Regression test for bootstrap bugs
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: Set placement-strategy value as "default"(bsc#1129462)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+ When Run "crm configure get_property placement-strategy" on "hanode1"
+ Then Got output "default"
+
+ @clean
+ Scenario: Empty value not allowed for option(bsc#1141976)
+ When Try "crm -c ' '"
+ Then Except "ERROR: Empty value not allowed for dest "cib""
+ When Try "crm cluster init --name ' '"
+ Then Except "ERROR: cluster.init: Empty value not allowed for dest "cluster_name""
+ When Try "crm cluster join -c ' '"
+ Then Except "ERROR: cluster.join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster remove -c ' '"
+ Then Except "ERROR: cluster.remove: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init -a ' '"
+ Then Except "ERROR: cluster.geo_init: Empty value not allowed for dest "arbitrator""
+ When Try "crm cluster geo_join -c ' '"
+ Then Except "ERROR: cluster.geo_join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init_arbitrator -c ' '"
+ Then Except "ERROR: cluster.geo_init_arbitrator: Empty value not allowed for dest "cluster_node""
+
+ @clean
+ Scenario: Setup cluster with crossed network(udpu only)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth0 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "stopped" on "hanode2"
+ And Except "Cannot see peer node "hanode1", please check the communication IP" in stderr
+ When Run "crm cluster join -c hanode1 -i eth0 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Remove correspond nodelist in corosync.conf while remove(bsc#1165644)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" in stdout
+ #And Service "hawk.service" is "started" on "hanode2"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Online nodes are "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # verify bsc#1175708
+ #And Service "hawk.service" is "stopped" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" not in stdout
+
+ @clean
+ Scenario: Multi nodes join in parallel(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2,hanode3"
+ Then Cluster service is "started" on "hanode2"
+ And Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Show cluster status on "hanode1"
+ And File "/etc/corosync/corosync.conf" was synced in cluster
+
+ @clean
+ Scenario: Multi nodes join in parallel timed out(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ # Try to simulate the join process hanging on hanode2 or hanode2 died
+ # Just leave the lock directory unremoved
+ When Run "mkdir /run/.crmsh_lock_directory" on "hanode1"
+ When Try "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (hanode1:/run/.crmsh_lock_directory)"
+ When Run "rm -rf /run/.crmsh_lock_directory" on "hanode1"
+
+ @clean
+ Scenario: Change host name in /etc/hosts as alias(bsc#1183654)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "echo '@hanode1.ip.0 HANODE1'|sudo tee -a /etc/hosts" on "hanode1"
+ When Run "echo '@hanode2.ip.0 HANODE2'|sudo tee -a /etc/hosts" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c HANODE1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster remove HANODE2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+
+ @clean
+ Scenario: Stop service quickly(bsc#1203601)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start --all;sudo crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "systemctl start corosync" on "hanode1"
+ Then Service "corosync" is "started" on "hanode1"
+ When Run "crm cluster stop" on "hanode1"
+ Then Service "corosync" is "stopped" on "hanode1"
+
+ @clean
+ Scenario: Can't stop all nodes' cluster service when local node's service is down(bsc#1213889)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Wait for DC
+ And Run "crm cluster stop" on "hanode1"
+ And Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: crm cluster join default behavior change in ssh key handling (bsc#1210693)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "rm -rf /home/alice/.ssh" on "hanode1"
+ When Run "rm -rf /home/alice/.ssh" on "hanode2"
+ When Run "su - alice -c "sudo crm cluster init -y"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "su - alice -c "sudo crm cluster join -c hanode1 -y"" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Passwordless for root, not for sudoer(bsc#1209193)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "useradd -m -s /bin/bash xin" on "hanode1"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin" on "hanode2"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "su xin -c "sudo crm cluster run 'touch /tmp/1209193'"" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Missing public key
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Check user shell for hacluster between "hanode1 hanode2"
+ Then Check passwordless for hacluster between "hanode1 hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Skip upgrade when preconditions are not satisfied
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{.bak,}" on "hanode1"
+ And Run "mv /root/.ssh{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ And Run "rm -rf /root/.ssh && mv /root/.ssh{.bak,}" OK on "hanode1"
+
+ # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
+ @skip_non_root
+ @clean
+ Scenario: Owner and permssion of file authorized_keys (bsc#1217279)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # in a newly created cluster
+ When Run "crm cluster init -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ And Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys exists
+ When Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode1"
+ And Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys does not exist
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout