summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--test/README.regression154
-rw-r--r--test/bugs-test.txt11
-rwxr-xr-xtest/cib-tests.sh90
-rw-r--r--test/cibtests/001.exp.xml20
-rw-r--r--test/cibtests/001.input6
-rw-r--r--test/cibtests/002.exp.xml26
-rw-r--r--test/cibtests/002.input8
-rw-r--r--test/cibtests/003.exp.xml27
-rw-r--r--test/cibtests/003.input11
-rw-r--r--test/cibtests/004.exp.xml27
-rw-r--r--test/cibtests/004.input11
-rw-r--r--test/cibtests/shadow.base10
-rw-r--r--test/crm-interface89
-rw-r--r--test/defaults2
-rw-r--r--test/descriptions19
-rwxr-xr-xtest/evaltest.sh113
-rw-r--r--test/features/bootstrap_bugs.feature251
-rw-r--r--test/features/bootstrap_init_join_remove.feature205
-rw-r--r--test/features/bootstrap_options.feature165
-rw-r--r--test/features/bootstrap_sbd_delay.feature286
-rw-r--r--test/features/bootstrap_sbd_normal.feature272
-rw-r--r--test/features/cluster_api.feature143
-rw-r--r--test/features/configure_bugs.feature38
-rw-r--r--test/features/constraints_bugs.feature24
-rw-r--r--test/features/coveragerc4
-rw-r--r--test/features/crm_report_bugs.feature164
-rw-r--r--test/features/crm_report_normal.feature109
-rw-r--r--test/features/environment.py53
-rw-r--r--test/features/geo_setup.feature29
-rw-r--r--test/features/healthcheck.feature37
-rw-r--r--test/features/ocfs2.feature61
-rw-r--r--test/features/qdevice_options.feature50
-rw-r--r--test/features/qdevice_setup_remove.feature173
-rw-r--r--test/features/qdevice_usercase.feature87
-rw-r--r--test/features/qdevice_validate.feature161
-rw-r--r--test/features/resource_failcount.feature61
-rw-r--r--test/features/resource_set.feature154
-rw-r--r--test/features/ssh_agent.feature86
-rw-r--r--test/features/steps/__init__.py0
-rwxr-xr-xtest/features/steps/behave_agent.py134
-rw-r--r--test/features/steps/const.py353
-rw-r--r--test/features/steps/step_implementation.py575
-rw-r--r--test/features/steps/utils.py177
-rw-r--r--test/features/user_access.feature114
-rw-r--r--test/history-test.tar.bz2bin0 -> 706600 bytes
-rwxr-xr-xtest/list-undocumented-commands.py29
-rwxr-xr-xtest/profile-history.sh22
-rwxr-xr-xtest/regression.sh199
-rwxr-xr-xtest/run-functional-tests551
-rw-r--r--test/testcases/acl60
-rw-r--r--test/testcases/acl.excl1
-rw-r--r--test/testcases/acl.exp94
-rw-r--r--test/testcases/basicset18
-rw-r--r--test/testcases/bugs79
-rw-r--r--test/testcases/bugs.exp215
-rw-r--r--test/testcases/bundle20
-rw-r--r--test/testcases/bundle.exp57
-rw-r--r--test/testcases/commit39
-rw-r--r--test/testcases/commit.exp90
-rw-r--r--test/testcases/common.excl26
-rwxr-xr-xtest/testcases/common.filter9
-rw-r--r--test/testcases/confbasic91
-rw-r--r--test/testcases/confbasic-xml72
-rw-r--r--test/testcases/confbasic-xml.exp206
-rwxr-xr-xtest/testcases/confbasic-xml.filter2
-rw-r--r--test/testcases/confbasic.exp199
-rw-r--r--test/testcases/delete64
-rw-r--r--test/testcases/delete.exp194
-rw-r--r--test/testcases/edit95
-rw-r--r--test/testcases/edit.excl1
-rw-r--r--test/testcases/edit.exp437
-rw-r--r--test/testcases/file14
-rw-r--r--test/testcases/file.exp77
-rw-r--r--test/testcases/history42
-rw-r--r--test/testcases/history.excl3
-rw-r--r--test/testcases/history.exp600
-rwxr-xr-xtest/testcases/history.post3
-rwxr-xr-xtest/testcases/history.pre3
-rw-r--r--test/testcases/newfeatures44
-rw-r--r--test/testcases/newfeatures.exp81
-rw-r--r--test/testcases/node14
-rw-r--r--test/testcases/node.exp204
-rw-r--r--test/testcases/options23
-rw-r--r--test/testcases/options.exp64
-rw-r--r--test/testcases/ra7
-rw-r--r--test/testcases/ra.exp150
-rwxr-xr-xtest/testcases/ra.filter17
-rw-r--r--test/testcases/resource84
-rw-r--r--test/testcases/resource.exp1450
-rw-r--r--test/testcases/rset21
-rw-r--r--test/testcases/rset-xml19
-rw-r--r--test/testcases/rset-xml.exp53
-rw-r--r--test/testcases/rset.exp66
-rw-r--r--test/testcases/scripts14
-rw-r--r--test/testcases/scripts.exp305
-rwxr-xr-xtest/testcases/scripts.filter4
-rw-r--r--test/testcases/shadow10
-rw-r--r--test/testcases/shadow.exp24
-rwxr-xr-xtest/testcases/xmlonly.sh5
-rw-r--r--test/unittests/__init__.py64
-rw-r--r--test/unittests/bug-862577_corosync.conf51
-rw-r--r--test/unittests/corosync.conf.181
-rw-r--r--test/unittests/corosync.conf.258
-rw-r--r--test/unittests/corosync.conf.368
-rw-r--r--test/unittests/pacemaker.log923
-rw-r--r--test/unittests/pacemaker.log.23
-rw-r--r--test/unittests/pacemaker_unicode.log30
-rw-r--r--test/unittests/schemas/acls-1.1.rng66
-rw-r--r--test/unittests/schemas/acls-1.2.rng66
-rw-r--r--test/unittests/schemas/constraints-1.0.rng180
-rw-r--r--test/unittests/schemas/constraints-1.1.rng246
-rw-r--r--test/unittests/schemas/constraints-1.2.rng219
-rw-r--r--test/unittests/schemas/fencing.rng29
-rw-r--r--test/unittests/schemas/nvset.rng35
-rw-r--r--test/unittests/schemas/pacemaker-1.0.rng121
-rw-r--r--test/unittests/schemas/pacemaker-1.1.rng161
-rw-r--r--test/unittests/schemas/pacemaker-1.2.rng146
-rw-r--r--test/unittests/schemas/resources-1.0.rng177
-rw-r--r--test/unittests/schemas/resources-1.1.rng225
-rw-r--r--test/unittests/schemas/resources-1.2.rng225
-rw-r--r--test/unittests/schemas/rule.rng137
-rw-r--r--test/unittests/schemas/score.rng18
-rw-r--r--test/unittests/schemas/versions.rng24
-rw-r--r--test/unittests/scripts/inc1/main.yml22
-rw-r--r--test/unittests/scripts/inc2/main.yml26
-rw-r--r--test/unittests/scripts/legacy/main.yml52
-rw-r--r--test/unittests/scripts/templates/apache.xml36
-rw-r--r--test/unittests/scripts/templates/virtual-ip.xml62
-rw-r--r--test/unittests/scripts/unified/main.yml26
-rw-r--r--test/unittests/scripts/v2/main.yml46
-rw-r--r--test/unittests/scripts/vip/main.yml28
-rw-r--r--test/unittests/scripts/vipinc/main.yml14
-rw-r--r--test/unittests/scripts/workflows/10-webserver.xml50
-rw-r--r--test/unittests/test.conf12
-rw-r--r--test/unittests/test_bootstrap.py1905
-rw-r--r--test/unittests/test_bugs.py893
-rw-r--r--test/unittests/test_cib.py32
-rw-r--r--test/unittests/test_cliformat.py324
-rw-r--r--test/unittests/test_corosync.py488
-rw-r--r--test/unittests/test_crashtest_check.py790
-rw-r--r--test/unittests/test_crashtest_main.py215
-rw-r--r--test/unittests/test_crashtest_task.py777
-rw-r--r--test/unittests/test_crashtest_utils.py540
-rw-r--r--test/unittests/test_gv.py36
-rw-r--r--test/unittests/test_handles.py166
-rw-r--r--test/unittests/test_lock.py271
-rw-r--r--test/unittests/test_objset.py40
-rw-r--r--test/unittests/test_ocfs2.py465
-rw-r--r--test/unittests/test_parallax.py104
-rw-r--r--test/unittests/test_parse.py749
-rw-r--r--test/unittests/test_prun.py157
-rw-r--r--test/unittests/test_qdevice.py1031
-rw-r--r--test/unittests/test_ratrace.py131
-rw-r--r--test/unittests/test_report_collect.py588
-rw-r--r--test/unittests/test_report_core.py551
-rw-r--r--test/unittests/test_report_utils.py862
-rw-r--r--test/unittests/test_sbd.py894
-rw-r--r--test/unittests/test_scripts.py914
-rw-r--r--test/unittests/test_service_manager.py84
-rw-r--r--test/unittests/test_sh.py189
-rw-r--r--test/unittests/test_time.py24
-rw-r--r--test/unittests/test_ui_cluster.py173
-rw-r--r--test/unittests/test_upgradeuitl.py54
-rw-r--r--test/unittests/test_utils.py1514
-rw-r--r--test/unittests/test_watchdog.py311
-rw-r--r--test/unittests/test_xmlutil.py61
-rwxr-xr-xtest/update-expected-output.sh9
-rw-r--r--test_container/Dockerfile28
-rw-r--r--test_container/behave-agent.socket9
-rw-r--r--test_container/behave-agent@.service9
-rwxr-xr-xtest_container/behave_agent.py131
171 files changed, 30107 insertions, 0 deletions
diff --git a/test/README.regression b/test/README.regression
new file mode 100644
index 0000000..839ce59
--- /dev/null
+++ b/test/README.regression
@@ -0,0 +1,154 @@
+CRM shell regression tests
+
+* WARNING * WARNING * WARNING * WARNING * WARNING * WARNING *
+*
+* evaltest.sh uses eval to an extent you don't really want to
+* know about. Beware. Beware twice. Any input from the testcases
+* directory is considered to be trusted. So, think twice before
+* devising your tests lest you kill your precious data. Got it?
+* Good.
+*
+* Furthermore, we are deliberately small on testing the user
+* input and no one should try to predict what is to happen on
+* random input from the testcases.
+*
+* WARNING * WARNING * WARNING * WARNING * WARNING * WARNING *
+
+Manifest
+
+ regression.sh: the top level program
+ evaltest.sh: the engine test engine
+
+ crm-interface: interface to crm
+ descriptions: describe what we are about to do
+ defaults: the default settings for test commands
+
+ testcases/: here are the testcases and filters
+ crmtestout/: here goes the output
+
+All volatile data lives in the testcases/ directory.
+
+NB: You should never ever need to edit regression.sh and
+evaltest.sh. If you really have to, please talk to me and I will
+try to fix it so that you do not have to.
+
+Please write new test cases. The more the merrier :)
+
+Usage
+
+The usage is:
+
+ ./regression.sh ["prepare"] ["set:"<setname>|<testcase>]
+
+Test cases are collected in test sets. The default test set is
+basicset and running regression.sh without arguments will do all
+tests from that set.
+
+To show progress, for each test a '.' is printed. Once all tests
+have been evaluated, the output is checked against the expect
+file. If successful, "PASS" is printed, otherwise "FAIL".
+
+Specifying "prepare" will make regression.sh create expect
+output files for the given set of tests or testcase.
+
+The script may start and stop lrmd and stonithd if they are not
+running to support the crm ra set of commands.
+
+The following files may be generated:
+
+ output/<testcase>.out: the output of the testcase
+ output/regression.out: the output of regression.sh
+ output/crm.out: the output of crm tools/lrmd/stonithd etc
+
+On success output from testcases is removed and regression.out is
+empty.
+
+Driving the test cases yourself
+
+evaltest.sh accepts input from stdin, evaluates it immediately,
+and prints results to stdout/stderr. One can perhaps get a better
+feeling of what's actually going on by running it interactively.
+
+Test cases
+
+Tests are mainly written in the crm shell language with some simple
+regression test directives (starting with '%' and
+session/show/showxml).
+
+Special operations
+
+There are special operations with which it is possible to change
+environment and do other useful things. All special ops start
+with the '%' sign and may be followed by additional parameters.
+
+%setenv
+ change the environment variable; see defaults for the
+ set of global variables and resetvars() in evaltest.sh
+
+%stop
+ skip the rest of the tests
+
+%extcheck
+ feed the output of the next test case to the specified
+ external program/filter; the program should either reside in
+ testcases/ or be in the PATH, i.e.
+
+ %extcheck cat
+
+ simulates a null op :)
+
+ see testcases/metadata for some examples
+
+%ext
+ run an external command provided in the rest of the line; for
+ example:
+
+ %ext date
+
+ would print the current time (not very useful for regression
+ testing).
+
+%repeat num
+ repeat the next test num times
+ there are several variables which are substituted in the test
+ lines, so that we can simulate a for loop:
+
+ s/%t/$test_cnt/g
+ s/%l/$line/g
+ s/%j/$job_cnt/g
+ s/%i/$repeat_cnt/g
+
+ for example, to add 10 resources:
+
+ %repeat 10
+ configure primitive p-%i ocf:pacemaker:Dummy
+
+Filters and other auxiliary files
+
+Some output is necessarily very volatile, such as time stamps.
+It is possible to specify a filter for each testcase to get rid
+of superfluous information. A filter is a filter in UNIX
+sense, it takes input from stdin and prints results to stdout.
+
+There is a common filter called very inventively
+testcases/common.filter which is applied to all test cases.
+
+Except files are a list of extended regular expressions fed to
+egrep(1). That way one can filter out lines which are not
+interesting. Again, the one applied to all is
+testcases/common.excl.
+
+A test may need an arbitrary script executed before or after the
+test itself in order to ascertain some state. The two scripts
+have extensions .pre and .post respectively. Their output is sent
+to /dev/null and the exit status ignored.
+
+Finally, the daemon log files may be filtered using log_filter.
+
+The full collection of auxiliary files follows:
+
+ <TEST>.filter
+ <TEST>.excl
+ <TEST>.log_filter
+ <TEST>.pre
+ <TEST>.post
diff --git a/test/bugs-test.txt b/test/bugs-test.txt
new file mode 100644
index 0000000..f33e78f
--- /dev/null
+++ b/test/bugs-test.txt
@@ -0,0 +1,11 @@
+node node1
+primitive st stonith:null params hostlist=node1
+op_defaults timeout=60s
+group g1 gr1 gr2
+group g2 gr3
+group g3 gr4
+primitive gr1 Dummy
+primitive gr2 Dummy
+primitive gr3 Dummy
+primitive gr4 Dummy
+location loc1 g1 rule 200: #uname eq node1
diff --git a/test/cib-tests.sh b/test/cib-tests.sh
new file mode 100755
index 0000000..4df8062
--- /dev/null
+++ b/test/cib-tests.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# Copyright (C) 2009 Lars Marowsky-Bree <lmb@suse.de>
+# See COPYING for license information.
+
+BASE=${1:-`pwd`}/cibtests
+AUTOCREATE=1
+
+logt() {
+ local msg="$1"
+ echo $(date) "$msg" >>$LOGF
+ echo "$msg"
+}
+
+difft() {
+ crm_diff -V -u -o $1 -n $2
+}
+
+run() {
+ local cmd="$1"
+ local erc="$2"
+ local msg="$3"
+ local rc
+ local out
+
+ echo $(date) "$1" >>$LOGF
+ CIB_file=$CIB_file $1 >>$LOGF 2>&1 ; rc=$?
+ echo $(date) "Returned: $rc (expected $erc)" >>$LOGF
+ if [ $erc != "I" ]; then
+ if [ $rc -ne $erc ]; then
+ logt "$msg: FAILED ($erc != $rc)"
+ cat $LOGF
+ return 1
+ fi
+ fi
+ echo "$msg: ok"
+ return 0
+}
+
+runt() {
+ local T="$1"
+ local CIBE="$BASE/$(basename $T .input).exp.xml"
+ cp $BASE/shadow.base $CIB_file
+ run "crm" 0 "Running testcase: $T" <$T
+
+ # strip <cib> attributes from CIB_file
+ echo "<cib>" > $CIB_file.$$
+ tail -n +2 $CIB_file >> $CIB_file.$$
+ mv $CIB_file.$$ $CIB_file
+
+ local rc
+ if [ ! -e $CIBE ]; then
+ if [ "$AUTOCREATE" = "1" ]; then
+ logt "Creating new expected output for $T."
+ cp $CIB_file $CIBE
+ return 0
+ else
+ logt "$T: No expected output."
+ return 0
+ fi
+ fi
+
+ if ! crm_diff -u -o $CIBE -n $CIB_file >/dev/null 2>&1 ; then
+ logt "$T: XML: $CIBE does not match $CIB_file"
+ difft $CIBE $CIB_file
+ return 1
+ fi
+ return 0
+}
+
+LOGF=$(mktemp)
+export PATH=/usr/sbin:$PATH
+
+export CIB_file=$BASE/shadow.test
+
+failed=0
+for T in $(ls $BASE/*.input) ; do
+ runt $T
+ failed=$(($? + $failed))
+done
+
+if [ $failed -gt 0 ]; then
+ logt "$failed tests failed!"
+ echo "Log:" $LOGF "CIB:" $CIB_file
+ exit 1
+fi
+
+logt "All tests passed!"
+#rm $LOGF $CIB_file
+exit 0
+
diff --git a/test/cibtests/001.exp.xml b/test/cibtests/001.exp.xml
new file mode 100644
index 0000000..c76e9d1
--- /dev/null
+++ b/test/cibtests/001.exp.xml
@@ -0,0 +1,20 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <primitive id="rsc_dummy" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" interval="30" id="rsc_dummy-monitor-30"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/001.input b/test/cibtests/001.input
new file mode 100644
index 0000000..8449a44
--- /dev/null
+++ b/test/cibtests/001.input
@@ -0,0 +1,6 @@
+configure
+property stonith-enabled=false
+primitive rsc_dummy ocf:heartbeat:Dummy
+monitor rsc_dummy 30
+commit
+quit
diff --git a/test/cibtests/002.exp.xml b/test/cibtests/002.exp.xml
new file mode 100644
index 0000000..13c017a
--- /dev/null
+++ b/test/cibtests/002.exp.xml
@@ -0,0 +1,26 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="1" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/002.input b/test/cibtests/002.input
new file mode 100644
index 0000000..7fd9acd
--- /dev/null
+++ b/test/cibtests/002.input
@@ -0,0 +1,8 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=1
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+quit
diff --git a/test/cibtests/003.exp.xml b/test/cibtests/003.exp.xml
new file mode 100644
index 0000000..70356af
--- /dev/null
+++ b/test/cibtests/003.exp.xml
@@ -0,0 +1,27 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ <nvpair id="testfs-clone-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="2" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/003.input b/test/cibtests/003.input
new file mode 100644
index 0000000..171f1cd
--- /dev/null
+++ b/test/cibtests/003.input
@@ -0,0 +1,11 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=2
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+up
+resource stop testfs-clone
+quit
+
diff --git a/test/cibtests/004.exp.xml b/test/cibtests/004.exp.xml
new file mode 100644
index 0000000..2d4c618
--- /dev/null
+++ b/test/cibtests/004.exp.xml
@@ -0,0 +1,27 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ <nvpair id="testfs-clone-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="hello" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/004.input b/test/cibtests/004.input
new file mode 100644
index 0000000..86839bc
--- /dev/null
+++ b/test/cibtests/004.input
@@ -0,0 +1,11 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=hello
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+up
+resource start testfs-clone
+quit
+
diff --git a/test/cibtests/shadow.base b/test/cibtests/shadow.base
new file mode 100644
index 0000000..a4b376d
--- /dev/null
+++ b/test/cibtests/shadow.base
@@ -0,0 +1,10 @@
+<cib crm_feature_set="3.0.9" validate-with="pacemaker-2.0" epoch="59" num_updates="0" admin_epoch="0" cib-last-written="Tue Sep 2 12:08:39 2014">
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/crm-interface b/test/crm-interface
new file mode 100644
index 0000000..b825dab
--- /dev/null
+++ b/test/crm-interface
@@ -0,0 +1,89 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+CIB=__crmsh_regtest
+
+filter_epoch() {
+ sed '/^<cib /s/ epoch="[0-9]*"/ epoch="1"/'
+}
+filter_date() {
+ sed '/^<cib /s/cib-last-written=".*"/cib-last-written="Sun Apr 12 21:37:48 2009"/'
+}
+filter_cib() {
+ sed -n '/^<?xml/,/^<\/cib>/p' | filter_date | filter_epoch
+}
+
+crm_setup() {
+ $CRM_NO_REG options reset
+ $CRM_NO_REG options check-frequency on-verify
+ $CRM_NO_REG options check-mode relaxed
+ $CRM_NO_REG cib delete $CIB >/dev/null 2>&1
+}
+
+crm_mksample() {
+ $CRM_NO_REG cib new $CIB empty >/dev/null 2>&1
+ $CRM_NO_REG -c $CIB<<EOF
+configure
+node node1
+primitive p0 ocf:pacemaker:Dummy
+primitive p1 ocf:pacemaker:Dummy
+primitive p2 ocf:heartbeat:Delay \
+ params startdelay=2 mondelay=2 stopdelay=2
+primitive p3 ocf:pacemaker:Dummy
+primitive st stonith:null params hostlist=node1
+clone c1 p1
+clone m1 p2 meta promotable=true
+op_defaults timeout=60s
+commit
+up
+EOF
+}
+crm_show() {
+ $CRM -c $CIB<<EOF
+configure
+_regtest on
+erase
+erase nodes
+`cat`
+show
+commit
+EOF
+}
+crm_showxml() {
+ $CRM -c $CIB<<EOF | filter_cib
+configure
+_regtest on
+erase
+erase nodes
+`cat`
+show xml
+commit
+EOF
+}
+crm_session() {
+ $CRM -c $CIB <<EOF
+`cat`
+EOF
+}
+crm_filesession() {
+ local _file=`mktemp`
+ $CRM_NO_REG -c $CIB<<EOF
+configure
+delete node1
+EOF
+ $CRM -c $CIB configure save xml $_file
+ CIB_file=$_file $CRM <<EOF
+`cat`
+EOF
+ rm -f $_file
+}
+crm_single() {
+ $CRM -c $CIB $*
+}
+crm_showobj() {
+ $CRM -c $CIB<<EOF | filter_date | filter_epoch
+configure
+_regtest on
+show xml $1
+EOF
+}
diff --git a/test/defaults b/test/defaults
new file mode 100644
index 0000000..50a7a6a
--- /dev/null
+++ b/test/defaults
@@ -0,0 +1,2 @@
+# defaults
+dflt_args=""
diff --git a/test/descriptions b/test/descriptions
new file mode 100644
index 0000000..694a528
--- /dev/null
+++ b/test/descriptions
@@ -0,0 +1,19 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+lead=".TRY"
+describe_show() {
+ echo $lead $*
+}
+describe_showxml() {
+ : echo $lead $*
+}
+describe_session() {
+ echo $lead $*
+}
+describe_filesession() {
+ echo $lead $*
+}
+describe_single() {
+ echo $lead $*
+}
diff --git a/test/evaltest.sh b/test/evaltest.sh
new file mode 100755
index 0000000..1dd6394
--- /dev/null
+++ b/test/evaltest.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Copyright (C) 2007 Dejan Muhamedagic <dejan@suse.de>
+# See COPYING for license information.
+
+: ${TESTDIR:=testcases}
+: ${CRM:=crm}
+CRM_NO_REG="$CRM"
+CRM="$CRM -R"
+export PYTHONUNBUFFERED=1
+export CRMSH_REGRESSION_TEST=1
+
+if [ "$1" = prof ]; then
+ CRM="$CRM -X regtest.profile"
+fi
+
+. ./defaults
+. ./crm-interface
+. ./descriptions
+
+resetvars() {
+ unset args
+ unset extcheck
+}
+
+#
+# special operations squad
+#
+specopt_setenv() {
+ eval $rest
+}
+specopt_ext() {
+ eval $rest
+}
+specopt_extcheck() {
+ extcheck="$rest"
+ set $extcheck
+ which "$1" >/dev/null 2>&1 || # a program in the PATH
+ extcheck="$TESTDIR/$extcheck" # or our script
+}
+specopt_repeat() {
+ repeat_limit=$rest
+}
+specopt() {
+ cmd=$(echo $cmd | sed 's/%//') # strip leading '%'
+ echo ".$(echo "$cmd" | tr "[:lower:]" "[:upper:]") $rest" # show what we got
+ "specopt_$cmd" # do what they asked for
+}
+
+#
+# substitute variables in the test line
+#
+substvars() {
+ sed "
+ s/%t/$test_cnt/g
+ s/%l/$line/g
+ s/%i/$repeat_cnt/g
+ "
+}
+
+dotest_session() {
+ echo -n "." >&3
+ test_cnt=$(($test_cnt+1))
+ "describe_$cmd" "$*" # show what we are about to do
+ "crm_$cmd" | # and execute the command
+ { [ "$extcheck" ] && $extcheck || cat;}
+}
+dotest_single() {
+ echo -n "." >&3
+ test_cnt=$(($test_cnt+1))
+ describe_single "$*" # show what we are about to do
+ crm_single "$*" | # and execute the command
+ { [ "$extcheck" ] && $extcheck || cat;}
+ if [ "$showobj" ]; then
+ crm_showobj $showobj
+ fi
+}
+runtest_session() {
+ while read line; do
+ if [ "$line" = . ]; then
+ break
+ fi
+ echo "$line"
+ done | dotest_session $*
+}
+runtest_single() {
+ while [ $repeat_cnt -le $repeat_limit ]; do
+ dotest_single "$*"
+ resetvars # unset all variables
+ repeat_cnt=$(($repeat_cnt+1))
+ done
+ repeat_limit=1 repeat_cnt=1
+}
+
+#
+# run the tests
+#
+repeat_limit=1 repeat_cnt=1
+line=1
+test_cnt=1
+
+crm_setup
+crm_mksample
+while read cmd rest; do
+ case "$cmd" in
+ "") : empty ;;
+ "#"*) : a comment ;;
+ "%stop") break ;;
+ "%"*) specopt ;;
+ show|showxml|session|filesession) runtest_session $rest ;;
+ *) runtest_single $cmd $rest ;;
+ esac
+ line=$(($line+1))
+done
diff --git a/test/features/bootstrap_bugs.feature b/test/features/bootstrap_bugs.feature
new file mode 100644
index 0000000..e6a2d6e
--- /dev/null
+++ b/test/features/bootstrap_bugs.feature
@@ -0,0 +1,251 @@
+@bootstrap
+Feature: Regression test for bootstrap bugs
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: Set placement-strategy value as "default"(bsc#1129462)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+ When Run "crm configure get_property placement-strategy" on "hanode1"
+ Then Got output "default"
+
+ @clean
+ Scenario: Empty value not allowed for option(bsc#1141976)
+ When Try "crm -c ' '"
+ Then Except "ERROR: Empty value not allowed for dest "cib""
+ When Try "crm cluster init --name ' '"
+ Then Except "ERROR: cluster.init: Empty value not allowed for dest "cluster_name""
+ When Try "crm cluster join -c ' '"
+ Then Except "ERROR: cluster.join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster remove -c ' '"
+ Then Except "ERROR: cluster.remove: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init -a ' '"
+ Then Except "ERROR: cluster.geo_init: Empty value not allowed for dest "arbitrator""
+ When Try "crm cluster geo_join -c ' '"
+ Then Except "ERROR: cluster.geo_join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init_arbitrator -c ' '"
+ Then Except "ERROR: cluster.geo_init_arbitrator: Empty value not allowed for dest "cluster_node""
+
+ @clean
+ Scenario: Setup cluster with crossed network(udpu only)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth0 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "stopped" on "hanode2"
+ And Except "Cannot see peer node "hanode1", please check the communication IP" in stderr
+ When Run "crm cluster join -c hanode1 -i eth0 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Remove correspond nodelist in corosync.conf while remove(bsc#1165644)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" in stdout
+ #And Service "hawk.service" is "started" on "hanode2"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Online nodes are "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # verify bsc#1175708
+ #And Service "hawk.service" is "stopped" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" not in stdout
+
+ @clean
+ Scenario: Multi nodes join in parallel(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2,hanode3"
+ Then Cluster service is "started" on "hanode2"
+ And Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Show cluster status on "hanode1"
+ And File "/etc/corosync/corosync.conf" was synced in cluster
+
+ @clean
+ Scenario: Multi nodes join in parallel timed out(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ # Try to simulate the join process hanging on hanode2 or hanode2 died
+ # Just leave the lock directory unremoved
+ When Run "mkdir /run/.crmsh_lock_directory" on "hanode1"
+ When Try "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (hanode1:/run/.crmsh_lock_directory)"
+ When Run "rm -rf /run/.crmsh_lock_directory" on "hanode1"
+
+ @clean
+ Scenario: Change host name in /etc/hosts as alias(bsc#1183654)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "echo '@hanode1.ip.0 HANODE1'|sudo tee -a /etc/hosts" on "hanode1"
+ When Run "echo '@hanode2.ip.0 HANODE2'|sudo tee -a /etc/hosts" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c HANODE1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster remove HANODE2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+
+ @clean
+ Scenario: Stop service quickly(bsc#1203601)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start --all;sudo crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "systemctl start corosync" on "hanode1"
+ Then Service "corosync" is "started" on "hanode1"
+ When Run "crm cluster stop" on "hanode1"
+ Then Service "corosync" is "stopped" on "hanode1"
+
+ @clean
+ Scenario: Can't stop all nodes' cluster service when local node's service is down(bsc#1213889)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Wait for DC
+ And Run "crm cluster stop" on "hanode1"
+ And Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: crm cluster join default behavior change in ssh key handling (bsc#1210693)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "rm -rf /home/alice/.ssh" on "hanode1"
+ When Run "rm -rf /home/alice/.ssh" on "hanode2"
+ When Run "su - alice -c "sudo crm cluster init -y"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "su - alice -c "sudo crm cluster join -c hanode1 -y"" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Passwordless for root, not for sudoer(bsc#1209193)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "useradd -m -s /bin/bash xin" on "hanode1"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin" on "hanode2"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "su xin -c "sudo crm cluster run 'touch /tmp/1209193'"" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Missing public key
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Check user shell for hacluster between "hanode1 hanode2"
+ Then Check passwordless for hacluster between "hanode1 hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Skip upgrade when preconditions are not satisfied
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{.bak,}" on "hanode1"
+ And Run "mv /root/.ssh{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ And Run "rm -rf /root/.ssh && mv /root/.ssh{.bak,}" OK on "hanode1"
+
+ # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
+ @skip_non_root
+ @clean
+ Scenario: Owner and permssion of file authorized_keys (bsc#1217279)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # in a newly created cluster
+ When Run "crm cluster init -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ And Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys exists
+ When Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode1"
+ And Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys does not exist
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
diff --git a/test/features/bootstrap_init_join_remove.feature b/test/features/bootstrap_init_join_remove.feature
new file mode 100644
index 0000000..ed04525
--- /dev/null
+++ b/test/features/bootstrap_init_join_remove.feature
@@ -0,0 +1,205 @@
+@bootstrap
+Feature: crmsh bootstrap process - init, join and remove
+
+ Test crmsh bootstrap init/join/remove process
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Nodes ["hanode1", "hanode2", "hanode3"] are cleaned up
+ And Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ Scenario: Init cluster service on node "hanode1", and join on node "hanode2"
+
+ Scenario: Support --all or specific node to manage cluster and nodes
+ When Run "crm node standby --all" on "hanode1"
+ Then Node "hanode1" is standby
+ And Node "hanode2" is standby
+ When Run "crm node online --all" on "hanode1"
+ Then Node "hanode1" is online
+ And Node "hanode2" is online
+ When Wait for DC
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start --all" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ When Wait for DC
+ When Run "crm cluster stop hanode2" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start hanode2" on "hanode1"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster disable hanode2" on "hanode1"
+ Then Cluster service is "disabled" on "hanode2"
+ When Run "crm cluster enable hanode2" on "hanode1"
+ Then Cluster service is "enabled" on "hanode2"
+ When Run "crm cluster restart --all" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ Scenario: Remove peer node "hanode2"
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode2"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode2"
+ Then File "/etc/corosync/authkey" exists on "hanode2"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode2"
+ Then File "/etc/pacemaker/authkey" exists on "hanode2"
+ Then Directory "/var/lib/csync2/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode2"
+ Then Directory "/var/lib/corosync/" not empty on "hanode2"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+ And Show cluster status on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode2"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode2"
+ Then File "/etc/corosync/authkey" not exist on "hanode2"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode2"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode2"
+ Then Directory "/var/lib/csync2/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
+ Then Directory "/var/lib/corosync/" is empty on "hanode2"
+
+ Scenario: Remove local node "hanode1"
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode1"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode1"
+ Then File "/etc/corosync/authkey" exists on "hanode1"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode1"
+ Then File "/etc/pacemaker/authkey" exists on "hanode1"
+ Then Directory "/var/lib/csync2/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode1"
+ Then Directory "/var/lib/corosync/" not empty on "hanode1"
+ When Run "crm cluster remove hanode1 -y --force" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode1"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode1"
+ Then File "/etc/corosync/authkey" not exist on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode1"
+ Then Directory "/var/lib/csync2/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode1"
+ Then Directory "/var/lib/corosync/" is empty on "hanode1"
+
+ Scenario: Remove peer node "hanode2" with `crm -F node delete`
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode2"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode2"
+ Then File "/etc/corosync/authkey" exists on "hanode2"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode2"
+ Then File "/etc/pacemaker/authkey" exists on "hanode2"
+ Then Directory "/var/lib/csync2/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode2"
+ Then Directory "/var/lib/corosync/" not empty on "hanode2"
+ When Run "crm -F cluster remove hanode2" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+ And Show cluster status on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode2"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode2"
+ Then File "/etc/corosync/authkey" not exist on "hanode2"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode2"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode2"
+ Then Directory "/var/lib/csync2/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
+ Then Directory "/var/lib/corosync/" is empty on "hanode2"
+ When Run "crm cluster remove hanode1 -y --force" on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+
+ Scenario: Remove local node "hanode1" with `crm -F node delete`
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode1"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode1"
+ Then File "/etc/corosync/authkey" exists on "hanode1"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode1"
+ Then File "/etc/pacemaker/authkey" exists on "hanode1"
+ Then Directory "/var/lib/csync2/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode1"
+ Then Directory "/var/lib/corosync/" not empty on "hanode1"
+ When Run "crm -F node delete hanode1" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode1"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode1"
+ Then File "/etc/corosync/authkey" not exist on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode1"
+ Then Directory "/var/lib/csync2/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode1"
+ Then Directory "/var/lib/corosync/" is empty on "hanode1"
+
+ Scenario: Check hacluster's passwordless configuration on 2 nodes
+ Then Check user shell for hacluster between "hanode1 hanode2"
+ Then Check passwordless for hacluster between "hanode1 hanode2"
+
+ Scenario: Check hacluster's passwordless configuration in old cluster, 2 nodes
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Check passwordless for hacluster between "hanode1 hanode2"
+
+ Scenario: Check hacluster's passwordless configuration on 3 nodes
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Check user shell for hacluster between "hanode1 hanode2 hanode3"
+ And Check passwordless for hacluster between "hanode1 hanode2 hanode3"
+
+ Scenario: Check hacluster's passwordless configuration in old cluster, 3 nodes
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Check passwordless for hacluster between "hanode1 hanode2 hanode3"
+
+ Scenario: Check hacluster's user shell
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode3"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode3"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode3"
+ And Run "crm status" on "hanode1"
+ Then Check user shell for hacluster between "hanode1 hanode2 hanode3"
+ Then Check passwordless for hacluster between "hanode1 hanode2 hanode3"
diff --git a/test/features/bootstrap_options.feature b/test/features/bootstrap_options.feature
new file mode 100644
index 0000000..5ccc052
--- /dev/null
+++ b/test/features/bootstrap_options.feature
@@ -0,0 +1,165 @@
+@bootstrap
+Feature: crmsh bootstrap process - options
+
+ Test crmsh bootstrap options:
+ "--node": Additional nodes to add to the created cluster
+ "-i": Bind to IP address on interface IF
+ "-M": Configure corosync with second heartbeat line
+ "-n": Set the name of the configured cluster
+ "-A": Configure IP address as an administration virtual IP
+ "-u": Configure corosync to communicate over unicast
+ "-U": Configure corosync to communicate over multicast
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: Check help output
+ When Run "crm -h" on "hanode1"
+ Then Output is the same with expected "crm" help output
+ When Run "crm cluster init -h" on "hanode1"
+ Then Output is the same with expected "crm cluster init" help output
+ When Run "crm cluster join -h" on "hanode1"
+ Then Output is the same with expected "crm cluster join" help output
+ When Run "crm cluster remove -h" on "hanode1"
+ Then Output is the same with expected "crm cluster remove" help output
+ When Run "crm cluster geo_init -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-init" help output
+ When Run "crm cluster geo_join -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-join" help output
+ When Run "crm cluster geo_init_arbitrator -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-init-arbitrator" help output
+ When Try "crm cluster init -i eth1 -i eth1 -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Duplicated input for '-i/--interface' option
+ """
+ When Try "crm cluster init sbd -x -y" on "hanode1"
+ Then Expected "-x option or SKIP_CSYNC2_SYNC can't be used with any stage" in stderr
+ When Try "crm cluster init -i eth0 -i eth1 -i eth2 -y" on "hanode1"
+ Then Expected "Maximum number of interface is 2" in stderr
+ When Try "crm cluster init sbd -N hanode1 -N hanode2 -y" on "hanode1"
+ Then Expected "Can't use -N/--nodes option and stage(sbd) together" in stderr
+
+ @clean
+ Scenario: Init whole cluster service on node "hanode1" using "--node" option
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y --node "hanode1 hanode2 hanode3"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Bind specific network interface using "-i" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+
+ @clean
+ Scenario: Using multiple network interface using "-M" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.default" is belong to "eth0"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -M -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Using multiple network interface using "-i" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.default" is belong to "eth0"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -i eth0 -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+
+ @clean
+ Scenario: Setup cluster name and virtual IP using "-A" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Try "crm cluster init -A xxx -y"
+ Then Except "ERROR: cluster.init: 'xxx' does not appear to be an IPv4 or IPv6 address"
+ When Try "crm cluster init -A @hanode1.ip.0 -y"
+ Then Except "ERROR: cluster.init: Address already in use: @hanode1.ip.0"
+ When Run "crm cluster init -n hatest -A @vip.0 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster name is "hatest"
+ And Cluster virtual IP is "@vip.0"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Init cluster service with udpu using "-u" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -u -y -i eth0" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster is using udpu transport mode
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And Show corosync ring status
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with ipv6 using "-I" option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -I -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip6.default" is used by corosync on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And IP "@hanode2.ip6.default" is used by corosync on "hanode2"
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with ipv6 unicast using "-I" and "-u" option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -I -i eth1 -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip6.default" is used by corosync on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And IP "@hanode2.ip6.default" is used by corosync on "hanode2"
+ And Show cluster status on "hanode1"
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with multicast using "-U" option (bsc#1132375)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -U -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode1"
+ And Corosync working on "multicast" mode
+
+ @clean
+ Scenario: Init cluster with -N option (bsc#1175863)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -N hanode1 -N hanode2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Skip using csync2 by -x option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y -x" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "csync2.socket" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "csync2.socket" is "stopped" on "hanode2"
+ When Run "crm cluster init csync2 -y" on "hanode1"
+ Then Service "csync2.socket" is "started" on "hanode1"
+ And Service "csync2.socket" is "started" on "hanode2"
diff --git a/test/features/bootstrap_sbd_delay.feature b/test/features/bootstrap_sbd_delay.feature
new file mode 100644
index 0000000..8b636d1
--- /dev/null
+++ b/test/features/bootstrap_sbd_delay.feature
@@ -0,0 +1,286 @@
+@sbd
+Feature: configure sbd delay start correctly
+
+ Tag @clean means need to stop cluster service if the service is available
+
+ @clean
+ Scenario: disk-based SBD with small sbd_watchdog_timeout
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # calculated and set by sbd RA
+ And Cluster property "stonith-timeout" is "43"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # value_from_sbd >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode3"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ # runtime value is "41", we keep the larger one here
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # value_from_sbd >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ # runtime value is "71", we keep ther larger one here
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And Service "sbd" is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: disk-less SBD with small sbd_watchdog_timeout
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "60"
+
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ # stonith-timeout >= 1.2 * max(stonith_watchdog_timeout, 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "71"
+
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "71"
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "71"
+
+ @clean
+ Scenario: disk-based SBD with big sbd_watchdog_timeout
+ When Run "sed -i 's/watchdog_timeout: 15/watchdog_timeout: 60/' /etc/crm/profiles.yml" on "hanode1"
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "60"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ # calculated and set by sbd RA
+ And Cluster property "stonith-timeout" is "172"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ # stonith-timeout >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ # since SBD_DELAY_START value(161s) > default systemd startup value(1min 30s)
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ # 1.2*SBD_DELAY_START
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+
+ Given Has disk "/dev/sda1" on "hanode3"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And Service "sbd" is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ When Run "sed -i 's/watchdog_timeout: 60/watchdog_timeout: 15/g' /etc/crm/profiles.yml" on "hanode1"
+
+ @clean
+ Scenario: Add sbd via stage on a running cluster
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ Then Service "sbd" is "started" on "hanode2"
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: Add disk-based sbd with qdevice
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+
+ When Run "crm cluster init -s /dev/sda1 --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "71"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ @clean
+ Scenario: Add disk-less sbd with qdevice
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+
+ When Run "crm cluster init -S --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+
+ And SBD option "SBD_DELAY_START" value is "81"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "35"
+ And Cluster property "stonith-timeout" is "95"
+ And Cluster property "stonith-watchdog-timeout" is "-1"
+
+ @clean
+ Scenario: Add and remove qdevice from cluster with sbd running
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: Test priority-fence-delay and priority
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ And Cluster property "stonith-timeout" is "83"
+ And Cluster property "priority-fencing-delay" is "60"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ And Cluster property "priority-fencing-delay" is "0"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
diff --git a/test/features/bootstrap_sbd_normal.feature b/test/features/bootstrap_sbd_normal.feature
new file mode 100644
index 0000000..8c5d421
--- /dev/null
+++ b/test/features/bootstrap_sbd_normal.feature
@@ -0,0 +1,272 @@
+@sbd
+Feature: crmsh bootstrap sbd management
+
+ Tag @clean means need to stop cluster service if the service is available
+
+ @clean
+ Scenario: Verify sbd device
+ When Try "crm cluster init -s "/dev/sda1;/dev/sda2;/dev/sda3;/dev/sda4" -y"
+ Then Except "ERROR: cluster.init: Maximum number of SBD device is 3"
+ When Try "crm cluster init -s "/dev/sda1;/dev/sdaxxxx" -y"
+ Then Except "ERROR: cluster.init: /dev/sdaxxxx doesn't look like a block device"
+ When Try "crm cluster init -s "/dev/sda1;/dev/sda1" -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Duplicated input for '-s/--sbd-device' option
+ """
+
+ @clean
+ Scenario: Setup sbd with init and join process(bsc#1170999)
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+
+ @clean
+ Scenario: Re-setup cluster without sbd(bsc#1166967)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "stopped" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure diskless sbd(bsc#1181907)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." not in stderr
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure multi disks sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda2" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Has disk "/dev/sda2" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -s /dev/sda2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+
+ @clean
+ Scenario: Configure sbd in several stages(bsc#1175057)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init ssh -y" on "hanode1"
+ And Run "crm cluster init csync2 -y" on "hanode1"
+ And Run "crm cluster init corosync -y" on "hanode1"
+ And Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster init cluster -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join ssh -y -c hanode1" on "hanode2"
+ And Run "crm cluster join csync2 -y -c hanode1" on "hanode2"
+ And Run "crm cluster join ssh_merge -y -c hanode1" on "hanode2"
+ And Run "crm cluster join cluster -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure diskless sbd in several stages(bsc#1175057)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init ssh -y" on "hanode1"
+ And Run "crm cluster init csync2 -y" on "hanode1"
+ And Run "crm cluster init corosync -y" on "hanode1"
+ And Run "crm cluster init sbd -S -y" on "hanode1"
+ And Run "crm cluster init cluster -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join ssh -y -c hanode1" on "hanode2"
+ And Run "crm cluster join csync2 -y -c hanode1" on "hanode2"
+ And Run "crm cluster join ssh_merge -y -c hanode1" on "hanode2"
+ And Run "crm cluster join cluster -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure sbd on running cluster via stage(bsc#1181906)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure sbd on running cluster via stage with ra running(bsc#1181906)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Expected "WARNING: To start sbd.service, need to restart cluster service manually on each node" in stderr
+ Then Service "sbd" is "stopped" on "hanode1"
+ And Service "sbd" is "stopped" on "hanode2"
+ When Run "crm cluster restart" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster restart" on "hanode2"
+ Then Service "sbd" is "started" on "hanode2"
+ When Run "sleep 20" on "hanode1"
+ Then Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure sbd when no watchdog device(bsc#1154927, bsc#1178869)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "lsmod |grep softdog && rmmod softdog" on "hanode1"
+ And Try "lsmod |grep softdog && rmmod softdog" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -w softdog -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Try "lsmod |grep softdog"
+ Then Expected return code is "0"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Setup sbd and test fence node
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "stonith_admin -H hanode2 -c" on "hanode1"
+ When Run "crm -F node fence hanode2" on "hanode1"
+ Then Expected return code is "0"
+ Then Node "hanode2" is UNCLEAN
+ Then Wait "60" seconds for "hanode2" successfully fenced
+
+ @skip_non_root
+ @clean
+ Scenario: Setup sbd and test fence node, use hacluster to fence
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "stonith_admin -H hanode2 -c" on "hanode1"
+ When Run "su hacluster -c '/usr/sbin/crm -F node fence hanode2'" on "hanode1"
+ Then Expected return code is "0"
+ Then Node "hanode2" is UNCLEAN
+ Then Wait "60" seconds for "hanode2" successfully fenced
+
+ @clean
+ Scenario: Change existing diskbased sbd cluster as diskless sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ When Run "crm -F cluster init sbd -S -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+ When Try "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '"
+ Then Expected return code is "1"
+
+ @clean
+ Scenario: Change existing diskless sbd cluster as diskbased sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ When Run "crm -F cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ @clean
+ Scenario: Change sbd device
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda2" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Has disk "/dev/sda2" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ When Run "crm -F cluster init sbd -s /dev/sda2 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda2 '" OK
+ When Try "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '"
+ Then Expected return code is "1"
diff --git a/test/features/cluster_api.feature b/test/features/cluster_api.feature
new file mode 100644
index 0000000..b8676be
--- /dev/null
+++ b/test/features/cluster_api.feature
@@ -0,0 +1,143 @@
+@cluster_api
+Feature: Functional test to cover SAP clusterAPI
+
+ To avoid possible regression on crmsh side when adapting SAP Applications
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm configure primitive d Dummy" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' > ~hacluster/.bashrc" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' > ~hacluster/.bashrc" on "hanode2"
+
+ @clean
+ Scenario: Start and stop resource by hacluster
+ When Run "su - hacluster -c 'crm resource stop d'" on "hanode1"
+ Then Expected return code is "0"
+ When Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Stopped"
+ And Show cluster status on "hanode1"
+ When Run "su - hacluster -c 'crm resource start d'" on "hanode1"
+ Then Expected return code is "0"
+ When Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Resource move by hacluster
+ Given Resource "d" is started on "hanode1"
+ # move <res> <node>
+ When Run "su - hacluster -c 'crm resource move d hanode2'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> force
+ When Run "su - hacluster -c 'crm resource move d hanode1'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> force
+ When Run "su - hacluster -c 'crm resource move d force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <lifetime> force
+ When Run "su - hacluster -c 'crm resource move d PT5M force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> <lifetime>
+ When Run "su - hacluster -c 'crm resource move d hanode2 PT5M'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> <lifetime> force
+ When Run "su - hacluster -c 'crm resource move d hanode1 PT5M force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ When Try "crm resource move d hanode2 PT5M force xxx"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d hanode2 PT5M forcd"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d xxxx PT5M force"
+ Then Except "ERROR: resource.move: Not our node: xxxx"
+ When Try "crm resource move d"
+ Then Except "ERROR: resource.move: No target node: Move requires either a target node or 'force'"
+
+ @clean
+ Scenario: Run "crm configure show" by hacluster
+ When Run "crm configure primitive d2 Dummy op monitor interval=10s timeout=20s on-fail=restart params fake=test meta resource-stickiness=5000" on "hanode1"
+ And Run "crm configure group g d2 meta resource-stickiness=3000" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d2" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+ When Run "su - hacluster -c 'crm configure show'" on "hanode1"
+ Then Expected return code is "0"
+ And Expected multiple lines in output
+ """
+ primitive d2 Dummy \
+ params fake=test \
+ meta resource-stickiness=5000 \
+ op monitor interval=10s timeout=20s on-fail=restart \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+ group g d2 \
+ meta resource-stickiness=3000
+ """
+
+ @clean
+ Scenario: pacemaker ACL related operations by hacluster
+ When Run "su - hacluster -c 'crm configure primitive d2 Dummy'" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d2" type "Dummy" is "Started"
+ When Run "su - hacluster -c 'crm maintenance on'" on "hanode1"
+ When Run "crm_mon -1" on "hanode1"
+ Then Expected "Resource management is DISABLED" in stdout
+ When Run "su - hacluster -c 'crm maintenance off'" on "hanode1"
+ When Run "crm_mon -1" on "hanode1"
+ Then Expected "Resource management is DISABLED" not in stdout
+ When Run "su - hacluster -c 'crm node standby hanode2'" on "hanode1"
+ Then Node "hanode2" is standby
+ When Run "su - hacluster -c 'crm node online hanode2'" on "hanode1"
+ Then Node "hanode2" is online
+ When Run "su - hacluster -c 'crm ra providers Dummy'" on "hanode1"
+ Then Expected "heartbeat pacemaker" in stdout
+ When Run "su - hacluster -c 'crm status'" on "hanode1"
+ Then Expected "Online: [ hanode1 hanode2 ]" in stdout
+ When Run "su - hacluster -c '/usr/sbin/crm report /tmp/report'" on "hanode1"
+ Then No crmsh tracebacks
+ Then File "/tmp/report.tar.bz2" exists on "hanode1"
+ And Directory "hanode1" in "/tmp/report.tar.bz2"
+ And Directory "hanode2" in "/tmp/report.tar.bz2"
+ And File "pacemaker.log" in "/tmp/report.tar.bz2"
+ And File "corosync.conf" in "/tmp/report.tar.bz2"
diff --git a/test/features/configure_bugs.feature b/test/features/configure_bugs.feature
new file mode 100644
index 0000000..7b1222d
--- /dev/null
+++ b/test/features/configure_bugs.feature
@@ -0,0 +1,38 @@
+@configure
+Feature: Functional test for configure sub level
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ @clean
+ Scenario: Replace sensitive data by default(bsc#1163581)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+
+ # mask password by default
+ When Run "crm node utilization hanode1 set password=qwertyui" on "hanode1"
+ When Try "crm configure show|grep password|grep qwertyui"
+ Then Expected return code is "1"
+ When Run "crm node utilization hanode2 set password testingpass" on "hanode1"
+ When Try "crm configure show|grep password|grep testingpass"
+ Then Expected return code is "1"
+ And Show crm configure
+
+ # mask password and ip address
+ When Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on "hanode1"
+ And Run "sed -i 's/; \[core\]/[core]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/; obscure_pattern = .*$/obscure_pattern = passw*|ip/g' /etc/crm/crm.conf" on "hanode1"
+ And Try "crm configure show|grep -E "@vip.0|qwertyui""
+ Then Expected return code is "1"
+ And Show crm configure
+
+ # mask password and ip address with another pattern
+ When Run "sed -i 's/obscure_pattern = .*$/obscure_pattern = passw* ip/g' /etc/crm/crm.conf" on "hanode1"
+ And Try "crm configure show|grep -E "@vip.0|qwertyui""
+ Then Expected return code is "1"
+ And Show crm configure
diff --git a/test/features/constraints_bugs.feature b/test/features/constraints_bugs.feature
new file mode 100644
index 0000000..c1174d5
--- /dev/null
+++ b/test/features/constraints_bugs.feature
@@ -0,0 +1,24 @@
+@constraints
+Feature: Verify constraints(order/colocation/location) bug
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Convert score to kind for rsc_order(bsc#1122391)
+ When Run "crm configure primitive d1 Dummy op monitor interval=10s" on "hanode1"
+ And Run "crm configure primitive d2 Dummy op monitor interval=10s" on "hanode1"
+ And Run "crm configure order o1 100: d1 d2" on "hanode1"
+ When Run "crm configure show" on "hanode1"
+ Then Expected "order o1 Mandatory: d1 d2" in stdout
diff --git a/test/features/coveragerc b/test/features/coveragerc
new file mode 100644
index 0000000..cb0403e
--- /dev/null
+++ b/test/features/coveragerc
@@ -0,0 +1,4 @@
+[run]
+data_file = /.coverage
+parallel = True
+source_pkgs = crmsh
diff --git a/test/features/crm_report_bugs.feature b/test/features/crm_report_bugs.feature
new file mode 100644
index 0000000..58d158b
--- /dev/null
+++ b/test/features/crm_report_bugs.feature
@@ -0,0 +1,164 @@
+@crm_report
+Feature: crm report functional test for verifying bugs
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Verify crm report handle files contain non-utf-8 characters (bsc#1130715)
+ When Run "echo 'abc#$%%^' | iconv -f UTF-8 -t UTF-16 > /opt/text_non_utf8" on "hanode1"
+ Then This file "/opt/text_non_utf8" will trigger UnicodeDecodeError exception
+ When Run "crm report -E /opt/text_non_utf8 report1" on "hanode1"
+ Then File "text_non_utf8" in "report1.tar.bz2"
+ When Run "rm -f report1.tar.bz2" on "hanode1"
+
+ @clean
+ Scenario: Compressed file ended before the end-of-stream marker was reached (bsc#1206606)
+ When Run "touch /var/log/pacemaker/pacemaker.log-20221220.xz" on "hanode1"
+ When Try "crm report report1" on "hanode1"
+ Then File "pacemaker.log" in "report1.tar.bz2"
+ And Expected "When reading file "/var/log/pacemaker/pacemaker.log-20221220.xz": Compressed file ended before the end-of-stream marker was reached" in stderr
+ When Run "rm -f report1.tar.bz2" on "hanode1"
+
+ @clean
+ Scenario: Include archived logs(bsc#1148873)
+ # For syslog
+ When Write multi lines to file "/var/log/log1" on "hanode1"
+ """
+ Sep 08 08:36:34 node1 log message line1
+ Sep 08 08:37:01 node1 log message line2
+ Sep 08 08:37:02 node1 log message line3
+ """
+ And Run "xz /var/log/log1" on "hanode1"
+ # bsc#1218491, unable to gather log files that are in the syslog format
+ And Run "touch -m -t 202201010000 /var/log/log1.xz" on "hanode1"
+ When Write multi lines to file "/var/log/log1" on "hanode1"
+ """
+ Sep 08 09:37:02 node1 log message line4
+ Sep 08 09:37:12 node1 log message line5
+ """
+ # bsc#1218491, unable to gather log files that are in the syslog format
+ And Run "touch -m -t 202201010001 /var/log/log1" on "hanode1"
+ And Run "crm report -f 20200901 -E /var/log/log1 report1" on "hanode1"
+ Then File "log1" in "report1.tar.bz2"
+ When Run "tar jxf report1.tar.bz2" on "hanode1"
+ And Run "cat report1/hanode1/log1" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ Sep 08 08:36:34 node1 log message line1
+ Sep 08 08:37:01 node1 log message line2
+ Sep 08 08:37:02 node1 log message line3
+ Sep 08 09:37:02 node1 log message line4
+ Sep 08 09:37:12 node1 log message line5
+ """
+ When Run "rm -rf report1.tar.gz report1" on "hanode1"
+
+ # For rfc5424
+ When Write multi lines to file "/var/log/log2" on "hanode1"
+ """
+ 2022-09-08T14:24:36.003Z mymachine.example.com myapp - ID47
+ 2022-09-08T14:25:15.003Z mymachine.example.com myapp - ID48
+ 2022-09-08T14:26:15.003Z mymachine.example.com myapp - ID49
+ """
+ And Run "xz /var/log/log2" on "hanode1"
+ When Write multi lines to file "/var/log/log2" on "hanode1"
+ """
+ 2022-09-08T14:27:15.003Z mymachine.example.com myapp - ID50
+ 2022-09-08T14:28:15.003Z mymachine.example.com myapp - ID51
+ """
+ And Run "crm report -f 20200901 -E /var/log/log2 report1" on "hanode1"
+ Then File "log2" in "report1.tar.bz2"
+ When Run "tar jxf report1.tar.bz2" on "hanode1"
+ And Run "cat report1/hanode1/log2" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ 2022-09-08T14:24:36.003Z mymachine.example.com myapp - ID47
+ 2022-09-08T14:25:15.003Z mymachine.example.com myapp - ID48
+ 2022-09-08T14:26:15.003Z mymachine.example.com myapp - ID49
+ 2022-09-08T14:27:15.003Z mymachine.example.com myapp - ID50
+ 2022-09-08T14:28:15.003Z mymachine.example.com myapp - ID51
+ """
+ When Run "rm -rf report1.tar.gz report1" on "hanode1"
+
+ @clean
+ Scenario: Collect corosync.log(bsc#1148874)
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1no/' /etc/corosync/corosync.conf" on "hanode1"
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1no/' /etc/corosync/corosync.conf" on "hanode2"
+ And Run "corosync-cfgtool -R" on "hanode1"
+ And Run "rm -f /var/log/cluster/corosync.log" on "hanode1"
+ And Run "rm -f /var/log/cluster/corosync.log" on "hanode2"
+ And Run "crm cluster stop --all" on "hanode1"
+ And Run "crm cluster start --all" on "hanode1"
+ And Run "sleep 15" on "hanode1"
+
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ Then File "corosync.log" not in "report.tar.bz2"
+ When Run "rm -rf report.tar.gz report" on "hanode1"
+
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1yes/' /etc/corosync/corosync.conf" on "hanode1"
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1yes/' /etc/corosync/corosync.conf" on "hanode2"
+ And Run "crm cluster stop --all" on "hanode1"
+ And Run "crm cluster start --all" on "hanode1"
+ And Run "sleep 15" on "hanode1"
+
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ Then File "corosync.log" in "report.tar.bz2"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ @clean
+ Scenario: Replace sensitive data(bsc#1163581)
+ # Set sensitive data TEL and password
+ When Run "crm node utilization hanode1 set TEL 13356789876" on "hanode1"
+ When Run "crm node utilization hanode1 set password qwertyui" on "hanode1"
+ When Run "crm report report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # crm report mask passw.* by default
+ # No password here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # mask password and ip address by using crm.conf
+ When Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on "hanode1"
+ And Run "sed -i 's/; \[report\]/[report]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/; sanitize_rule = .*$/sanitize_rule = passw.*|ip.*:raw/g' /etc/crm/crm.conf" on "hanode1"
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R -E '@vip.0|qwertyui' report"
+ # No password here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # Do sanitize job, also for TEL
+ When Run "crm report -s -p TEL report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # No password here
+ Then Expected return code is "1"
+ When Try "grep -R '13356789876' report"
+ # No TEL number here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # disable sanitize
+ When Run "sed -i 's/; \[report\]/[report]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/sanitize_rule = .*$/sanitize_rule = /g' /etc/crm/crm.conf" on "hanode1"
+ When Run "crm report report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # found password
+ Then Expected return code is "0"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
diff --git a/test/features/crm_report_normal.feature b/test/features/crm_report_normal.feature
new file mode 100644
index 0000000..00a1f2b
--- /dev/null
+++ b/test/features/crm_report_normal.feature
@@ -0,0 +1,109 @@
+@crm_report
+Feature: crm report functional test for common cases
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: crm report collect trace ra log
+ When Run "crm configure primitive d Dummy" on "hanode1"
+ And Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ And Resource "d2" is started on "hanode2"
+ When Run "crm resource trace d monitor" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /var/lib/heartbeat/trace_ra/Dummy" in stdout
+ When Wait "10" seconds
+ And Run "crm resource untrace d" on "hanode1"
+ And Run "crm resource trace d2 monitor /trace_d" on "hanode1"
+ Then Expected "Trace for d2:monitor is written to /trace_d/Dummy" in stdout
+ When Wait "10" seconds
+ And Run "crm resource untrace d2" on "hanode1"
+ And Run "crm report report" on "hanode1"
+ Then No crmsh tracebacks
+ Then Directory "trace_ra" in "report.tar.bz2"
+ And Directory "trace_d" in "report.tar.bz2"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ @clean
+ Scenario: Run history and script
+ When Run "crm history info" on "hanode1"
+ When Run "crm history refresh" on "hanode1"
+ When Try "crm history peinputs|grep "pengine/pe-input-0""
+ Then Expected return code is "0"
+ When Try "crm history info|grep "Nodes: hanode1 hanode2""
+ Then Expected return code is "0"
+ When Run "crm configure primitive d100 Dummy" on "hanode1"
+ When Run "crm history refresh force" on "hanode1"
+ When Try "crm history info|grep "Resources: d100""
+ Then Expected return code is "0"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Run "crm history refresh force" on "hanode1"
+ When Try "crm history info|grep "Nodes: hanode1 hanode2 hanode3""
+ Then Expected return code is "0"
+ When Run "crm script run health" on "hanode1"
+ When Run "crm script run virtual-ip id=vip_x ip=@vip.0" on "hanode1"
+ Then Resource "vip_x" type "IPaddr2" is "Started"
+
+ @clean
+ Scenario: Common tests
+ When Run "crm report -h" on "hanode1"
+
+ When Try "crm report "*s"" on "hanode1"
+ Then Expected "*s is invalid file name" in stderr
+
+ When Try "crm report /fsf/report" on "hanode1"
+ Then Expected "Directory /fsf does not exist" in stderr
+
+ When Try "crm report -n fs" on "hanode1"
+ Then Expected "host "fs" is unreachable:" in stderr
+
+ When Try "crm report -f xxxx" on "hanode1"
+ Then Expected "Invalid time string 'xxxx'" in stderr
+
+ When Try "crm report -f 1d -t 2d" on "hanode1"
+ Then Expected "The start time must be before the finish time" in stderr
+
+ When Run "crm -d report -S -d /tmp/report" on "hanode1"
+ Then Directory "/tmp/report/hanode1" created
+ Then Directory "/tmp/report/hanode2" not created
+ When Run "rm -rf /tmp/report" on "hanode1"
+
+ When Run "crm report -vv" on "hanode1"
+ Then Default crm_report tar file created
+ When Remove default crm_report tar file
+
+ When Run "crm report -d /tmp/report" on "hanode1"
+ Then Directory "/tmp/report" created
+ When Try "crm report -d /tmp/report" on "hanode1"
+ Then Expected "Destination directory /tmp/report exists, please cleanup or use -Z option" in stderr
+ When Run "crm report -d -Z /tmp/report" on "hanode1"
+ Then Directory "/tmp/report" created
+
+ When Run "mv /etc/corosync/corosync.conf /etc/corosync/corosync.bak" on "hanode1"
+ When Try "crm report" on "hanode1"
+ Then Expected "File /etc/corosync/corosync.conf does not exist" in stderr
+ When Run "mv /etc/corosync/corosync.bak /etc/corosync/corosync.conf" on "hanode1"
+
+ When Run "mv /var/lib/pacemaker/pengine /var/lib/pacemaker/pengine_bak" on "hanode1"
+ When Try "crm report" on "hanode1"
+ Then Expected "Cannot find PE directory" in stderr
+ When Run "mv /var/lib/pacemaker/pengine_bak /var/lib/pacemaker/pengine" on "hanode1"
+
+ When Run "crm cluster stop --all" on "hanode1"
+ When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode1"
+ When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode2"
+ When Try "crm report" on "hanode1"
+ Then Expected "Could not figure out a list of nodes; is this a cluster node" in stderr
diff --git a/test/features/environment.py b/test/features/environment.py
new file mode 100644
index 0000000..61d2ac2
--- /dev/null
+++ b/test/features/environment.py
@@ -0,0 +1,53 @@
+import logging
+import re
+import subprocess
+import time
+
+import crmsh.userdir
+import crmsh.utils
+from crmsh.sh import ShellUtils
+
+
+def get_online_nodes():
+ _, out, _ = ShellUtils().get_stdout_stderr('sudo crm_node -l')
+ if out:
+ return re.findall(r'[0-9]+ (.*) member', out)
+ else:
+ return None
+
+
+def resource_cleanup():
+ subprocess.run(
+ ['sudo', 'crm', 'resource', 'cleanup'],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+
+
+def before_step(context, step):
+ context.logger = logging.getLogger("Step:{}".format(step.name))
+
+
+def before_tag(context, tag):
+ # tag @clean means need to stop cluster service
+ if tag == "clean":
+ time.sleep(3)
+ online_nodes = get_online_nodes()
+ if online_nodes:
+ resource_cleanup()
+ while True:
+ time.sleep(1)
+ rc, stdout, _ = ShellUtils().get_stdout_stderr('sudo crmadmin -D -t 1')
+ if rc == 0 and stdout.startswith('Designated'):
+ break
+ subprocess.call(
+ ['sudo', 'crm', 'cluster', 'stop', '--all'],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ if tag == "skip_non_root":
+ sudoer = crmsh.userdir.get_sudoer()
+ if sudoer or crmsh.userdir.getuser() != 'root':
+ context.scenario.skip()
diff --git a/test/features/geo_setup.feature b/test/features/geo_setup.feature
new file mode 100644
index 0000000..b26b04e
--- /dev/null
+++ b/test/features/geo_setup.feature
@@ -0,0 +1,29 @@
+@geo
+Feature: geo cluster
+
+ Test geo cluster setup using bootstrap
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: GEO cluster setup
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y -n cluster1" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on "hanode1"
+
+ When Run "crm cluster init -y -n cluster2" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on "hanode2"
+
+ When Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+ When Run "crm cluster geo_join -y --cluster-node hanode1 --clusters "cluster1=@vip.0 cluster2=@vip.1"" on "hanode2"
+
+ Given Service "booth@booth" is "stopped" on "hanode3"
+ When Run "crm cluster geo_init_arbitrator -y --cluster-node hanode1" on "hanode3"
+ Then Service "booth@booth" is "started" on "hanode3"
+ When Run "crm resource start g-booth" on "hanode1"
+ Then Show cluster status on "hanode1"
+ When Run "crm resource start g-booth" on "hanode2"
+ Then Show cluster status on "hanode2"
diff --git a/test/features/healthcheck.feature b/test/features/healthcheck.feature
new file mode 100644
index 0000000..da7f78a
--- /dev/null
+++ b/test/features/healthcheck.feature
@@ -0,0 +1,37 @@
+@healthcheck
+Feature: healthcheck detect and fix problems in a crmsh deployment
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: a new node joins when directory ~hacluster/.ssh is removed from cluster
+ When Run "rm -rf ~hacluster/.ssh" on "hanode1"
+ And Run "rm -rf ~hacluster/.ssh" on "hanode2"
+ And Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ # FIXME: new join implement does not trigger a exception any longer, and the auto fix is not applied
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode1"
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode2"
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode3"
+
+ # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
+ @skip_non_root
+ @clean
+ Scenario: An upgrade_seq file in ~hacluster/crmsh/ will be migrated to /var/lib/crmsh (bsc#1213050)
+ When Run "mv /var/lib/crmsh ~hacluster/" on "hanode1"
+ Then File "~hacluster/crmsh/upgrade_seq" exists on "hanode1"
+ When Run "crm cluster status" on "hanode1"
+ Then File "/var/lib/crmsh/upgrade_seq" exists on "hanode1"
diff --git a/test/features/ocfs2.feature b/test/features/ocfs2.feature
new file mode 100644
index 0000000..29b4b1a
--- /dev/null
+++ b/test/features/ocfs2.feature
@@ -0,0 +1,61 @@
+@ocfs2
+Feature: OCFS2 configuration/verify using bootstrap
+
+@clean
+Scenario: Configure ocfs2 along with init process
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Configure cluster lvm2 + ocfs2 with init process
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda3" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -o /dev/sda3 -C -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started"
+ And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Add ocfs2 alone on a running cluster
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda1" on "hanode2"
+ And Has disk "/dev/sda2" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Online nodes are "hanode1 hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster init ocfs2 -o /dev/sda2 -y" on "hanode1"
+ Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Add cluster lvm2 + ocfs2 on a running cluster
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda1" on "hanode2"
+ And Has disk "/dev/sda2" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Online nodes are "hanode1 hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster init ocfs2 -o /dev/sda2 -C -y" on "hanode1"
+ Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started"
+ And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
diff --git a/test/features/qdevice_options.feature b/test/features/qdevice_options.feature
new file mode 100644
index 0000000..e0277a7
--- /dev/null
+++ b/test/features/qdevice_options.feature
@@ -0,0 +1,50 @@
+@qdevice
+Feature: corosync qdevice/qnetd options
+
+ Test corosync qdevice/qnetd options:
+ "--qdevice-algo": QNetd decision ALGORITHM(ffsplit/lms, default:ffsplit)
+ "--qdevice-ti-breaker": QNetd TIE_BREAKER(lowest/highest/valid_node_id, default:lowest)
+ "--qdevice-tls": Whether using TLS on QDevice/QNetd(on/off/required, default:on)
+ "--qdevice-heuristics": COMMAND to run with absolute path. For multiple commands, use ";" to separate
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 qnetd-node
+
+ @clean
+ Scenario: Use "--qdevice-algo" to change qnetd decision algorithm to "lms"
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-algo=lms -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-tie-breaker" to change qnetd tie_breaker to "highest"
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tie-breaker=highest -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-tls" to turn off TLS certification
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tls=off -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-heuristics" to configure heuristics
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='/usr/bin/test -f /tmp/file_exists;/usr/bin/which pacemaker' -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
diff --git a/test/features/qdevice_setup_remove.feature b/test/features/qdevice_setup_remove.feature
new file mode 100644
index 0000000..df7af3d
--- /dev/null
+++ b/test/features/qdevice_setup_remove.feature
@@ -0,0 +1,173 @@
+@qdevice
+Feature: corosync qdevice/qnetd setup/remove process
+
+ Test corosync qdevice/qnetd setup/remove process
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3 hanode4 qnetd-node
+
+ Background: Cluster and qdevice service are stopped
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+
+ @clean
+ Scenario: Setup qdevice/qnetd during init/join process
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ # for bsc#1181415
+ Then Expected "Restarting cluster service" in stdout
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show status from qnetd
+ And Show corosync qdevice configuration
+ And Show qdevice status
+
+ @clean
+ Scenario: Setup qdevice/qnetd on running cluster
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ When Write multi lines to file "/etc/corosync/corosync.conf" on "hanode1"
+ """
+ # This is a test for bsc#1166684
+
+ """
+ When Write multi lines to file "/etc/corosync/corosync.conf" on "hanode2"
+ """
+ # This is a test for bsc#1166684
+
+ """
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ # for bsc#1181415
+ Then Expected "Starting corosync-qdevice.service in cluster" in stdout
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show status from qnetd
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Remove qdevice from a two nodes cluster
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Show corosync qdevice configuration
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Setup qdevice on multi nodes
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Expected votes will be "3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Expected votes will be "4"
+ When Run "crm cluster join -c hanode1 -y" on "hanode4"
+ Then Cluster service is "started" on "hanode4"
+ And Online nodes are "hanode1 hanode2 hanode3 hanode4"
+ And Service "corosync-qdevice" is "started" on "hanode4"
+ And Expected votes will be "5"
+ And Show corosync qdevice configuration
+ And Show status from qnetd
+
+ @clean
+ Scenario: Setup qdevice on multi nodes existing cluster
+ When Run "crm cluster init -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode4"
+ Then Cluster service is "started" on "hanode4"
+ And Online nodes are "hanode1 hanode2 hanode3 hanode4"
+ And Expected votes will be "4"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Show corosync qdevice configuration
+ And Expected votes will be "5"
+ And Service "corosync-qdevice" is "started" on "hanode4"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show status from qnetd
+
+ @clean
+ Scenario: Setup qdevice using IPv6
+ When Run "crm cluster init -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster init qdevice --qnetd-hostname @qnetd-node.ip6.0 -y" on "hanode1"
+ Then Show corosync qdevice configuration
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show status from qnetd
+
+ @skip_non_root
+ @clean
+ Scenario: Passwordless for root, not for sudoer (bsc#1209193)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "useradd -m -s /bin/bash xin" on "hanode1"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin" on "hanode2"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "su xin -c "sudo crm cluster init qdevice --qnetd-hostname=qnetd-node -y"" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Missing crm/crm.conf (bsc#1209193)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+
+ @clean
+ Scenario: One qnetd for multi cluster, add in parallel
+ When Run "crm cluster init -n cluster1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster init -n cluster2 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Run "crm cluster init qdevice --qnetd-hostname qnetd-node -y" on "hanode2,hanode3"
+ Then Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
diff --git a/test/features/qdevice_usercase.feature b/test/features/qdevice_usercase.feature
new file mode 100644
index 0000000..c35d2cb
--- /dev/null
+++ b/test/features/qdevice_usercase.feature
@@ -0,0 +1,87 @@
+@qdevice
+Feature: Verify usercase master survive when split-brain
+
+ Steps to setup a two-nodes cluster with heuristics qdevice,
+ started with a promotable clone resource, and make sure master side always with quorum:
+ 1. Setup a two-nodes cluster
+ 2. Generate script to check whether this node is master
+ 3. Add a promotable clone resource
+ 4. Setup qdevice with heuristics
+ 5. Use iptables command to simulate split-brain
+ 6. Check whether hanode1 has quorum, while hanode2 doesn't
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 qnetd-node
+
+ Background: Cluster and qdevice service are stopped
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+
+ @clean
+ Scenario: Setup qdevice with heuristics
+ When Run "crm cluster init -y --qnetd-hostname=qnetd-node --qdevice-heuristics="/usr/bin/test -f /tmp/heuristics.txt" --qdevice-heuristics-mode="on"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show corosync qdevice configuration
+ When Run "crm corosync status qnetd" on "hanode1"
+ Then Expected regrex "Heuristics:\s+Fail" in stdout
+ When Run "touch /tmp/heuristics.txt" on "hanode1"
+ When Run "sleep 30" on "hanode1"
+ When Run "crm corosync status qnetd" on "hanode1"
+ Then Expected regrex "Heuristics:\s+Pass" in stdout
+
+ @clean
+ Scenario: Master survive when split-brain
+ # Setup a two-nodes cluster
+ When Run "crm cluster init -y -i eth0" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y -i eth0" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ # Generate script to check whether this node is master
+ When Write multi lines to file "/etc/corosync/qdevice/check_master.sh" on "hanode1"
+ """
+ #!/usr/bin/sh
+ crm_resource --locate -r promotable-1 2>&1 | grep -E "Master|Promoted" | grep `crm_node -n` >/dev/null 2>&1
+ """
+ And Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode1"
+ When Write multi lines to file "/etc/corosync/qdevice/check_master.sh" on "hanode2"
+ """
+ #!/usr/bin/sh
+ crm_resource --locate -r promotable-1 2>&1 | grep -E "Master|Promoted" | grep `crm_node -n` >/dev/null 2>&1
+ """
+ And Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode2"
+ # Add a promotable clone resource and make sure hanode1 is master
+ And Run "crm configure primitive stateful-1 ocf:pacemaker:Stateful op monitor role=Promoted interval=10s op monitor role=Unpromoted interval=5s" on "hanode1"
+ And Run "crm configure clone promotable-1 stateful-1 meta promotable=true" on "hanode1"
+ And Run "sleep 5" on "hanode1"
+ Then Show cluster status on "hanode1"
+
+ # Setup qdevice with heuristics
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node --qdevice-heuristics=/etc/corosync/qdevice/check_master.sh -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ When Run "sleep 5" on "hanode1"
+ Then Show status from qnetd
+ When Run "corosync-quorumtool -s" on "hanode1"
+ Then Expected "Quorate: Yes" in stdout
+ # Use iptables command to simulate split-brain
+ When Run "iptables -I INPUT -s @hanode2.ip.default -j DROP; sudo iptables -I OUTPUT -d @hanode2.ip.default -j DROP" on "hanode1"
+ And Run "iptables -I INPUT -s @hanode1.ip.default -j DROP; sudo iptables -I OUTPUT -d @hanode1.ip.default -j DROP" on "hanode2"
+ # Check whether hanode1 has quorum, while hanode2 doesn't
+ And Run "sleep 20" on "hanode1"
+ When Run "crm corosync status quorum" on "hanode1"
+ Then Expected "Quorate: Yes" in stdout
+ When Run "crm corosync status quorum" on "hanode2"
+ Then Expected "Quorate: No" in stdout
+ And Show cluster status on "hanode1"
+ And Show cluster status on "hanode2"
+ When Try "crm corosync status fs" on "hanode1"
+ Then Expected "Wrong type "fs" to query status" in stderr
diff --git a/test/features/qdevice_validate.feature b/test/features/qdevice_validate.feature
new file mode 100644
index 0000000..5403a52
--- /dev/null
+++ b/test/features/qdevice_validate.feature
@@ -0,0 +1,161 @@
+@qdevice
+Feature: corosync qdevice/qnetd options validate
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3 qnetd-node node-without-ssh
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use the same node
+ When Try "crm cluster init --qnetd-hostname=hanode1"
+ Then Except "ERROR: cluster.init: host for qnetd must be a remote one"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use hanode1's IP
+ When Try "crm cluster init --qnetd-hostname=@hanode1.ip.0"
+ Then Except "ERROR: cluster.init: host for qnetd must be a remote one"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use unknown hostname
+ When Try "crm cluster init --qnetd-hostname=error-node"
+ Then Except "ERROR: cluster.init: host "error-node" is unreachable"
+
+ @clean
+ Scenario: Service ssh on qnetd node not available
+ When Run "systemctl stop sshd.service" on "node-without-ssh"
+ When Try "crm cluster init --qnetd-hostname=node-without-ssh"
+ Then Except "ERROR: cluster.init: ssh service on "node-without-ssh" not available"
+
+ @clean
+ Scenario: Option "--qdevice-port" set wrong port
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-port=1"
+ Then Except "ERROR: cluster.init: invalid qdevice port range(1024 - 65535)"
+
+ @clean
+ Scenario: Option "--qdevice-tie-breaker" set wrong value
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tie-breaker=wrongtiebreaker"
+ Then Except "ERROR: cluster.init: invalid qdevice tie_breaker(lowest/highest/valid_node_id)"
+
+ @clean
+ Scenario: Option "--qdevice-heuristics" set wrong value
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='ls /opt'"
+ Then Except "ERROR: cluster.init: commands for heuristics should be absolute path"
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='/bin/not_exist_cmd /opt'"
+ Then Except "ERROR: cluster.init: command /bin/not_exist_cmd not exist"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" is required by other qdevice options
+ When Try "crm cluster init --qdevice-port=1234"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qnetd-hostname is required if want to configure qdevice
+ """
+
+ @clean
+ Scenario: Option --qdevice-heuristics is required if want to configure heuristics mode
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics-mode="on""
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qdevice-heuristics is required if want to configure heuristics mode
+ """
+
+ @clean
+ Scenario: Node for qnetd not installed corosync-qnetd
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "crm cluster init --qnetd-hostname=hanode2 -y"
+ Then Except multiple lines
+ """
+ ERROR: cluster.init: Package "corosync-qnetd" not installed on hanode2!
+ Cluster service already successfully started on this node except qdevice service.
+ If you still want to use qdevice, install "corosync-qnetd" on hanode2.
+ Then run command "crm cluster init" with "qdevice" stage, like:
+ crm cluster init qdevice qdevice_related_options
+ That command will setup qdevice separately.
+ """
+ And Cluster service is "started" on "hanode1"
+
+ @clean
+ Scenario: Raise error when adding qdevice stage with the same cluster name
+ Given Cluster service is "stopped" on "hanode2"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -n cluster1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster init -n cluster1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Try "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode2,hanode3"
+ Then Except "ERROR: cluster.init: Duplicated cluster name "cluster1"!"
+ When Run "crm cluster stop" on "hanode2"
+ When Run "crm cluster stop" on "hanode3"
+
+ @clean
+ Scenario: Raise error when the same cluster name already exists on qnetd
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y" on "hanode2"
+ When Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y"
+ Then Except multiple lines
+ """
+ ERROR: cluster.init: This cluster's name "cluster1" already exists on qnetd server!
+ Cluster service already successfully started on this node except qdevice service.
+ If you still want to use qdevice, consider to use the different cluster-name property.
+ Then run command "crm cluster init" with "qdevice" stage, like:
+ crm cluster init qdevice qdevice_related_options
+ That command will setup qdevice separately.
+ """
+ And Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Run qdevice stage on inactive cluster node
+ Given Cluster service is "stopped" on "hanode1"
+ When Try "crm cluster init qdevice --qnetd-hostname=qnetd-node"
+ Then Except "ERROR: cluster.init: Cluster is inactive - can't run qdevice stage"
+
+ @clean
+ Scenario: Run qdevice stage but miss "--qnetd-hostname" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Try "crm cluster init qdevice -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qnetd-hostname is required if want to configure qdevice
+ """
+
+ @clean
+ Scenario: Setup qdevice on a single node cluster with RA running(bsc#1181415)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Expected "WARNING: To use qdevice service, need to restart cluster service manually on each node" in stderr
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster restart" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+
+ @clean
+ Scenario: Remove qdevice from a single node cluster(bsc#1181415)
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Expected "Restarting cluster service" in stdout
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+
+ @clean
+ Scenario: Remove qdevice from a single node cluster which has RA running(bsc#1181415)
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Expected "WARNING: To remove qdevice service, need to restart cluster service manually on each node" in stderr
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster restart" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
diff --git a/test/features/resource_failcount.feature b/test/features/resource_failcount.feature
new file mode 100644
index 0000000..69f402a
--- /dev/null
+++ b/test/features/resource_failcount.feature
@@ -0,0 +1,61 @@
+@resource
+Feature: Use "crm resource failcount" to manage failcounts
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1
+
+ Background: Setup one node cluster and configure a Dummy resource
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ Then Resource "d" type "Dummy" is "Started"
+
+ @clean
+ Scenario: Validation, input the wrong parameters
+ When Try "crm resource failcount d showss hanode1"
+ Then Except "ERROR: resource.failcount: showss is not valid command(should be one of ['set', 'delete', 'show'])"
+ When Try "crm resource failcount d set hanode11 0"
+ Then Except "ERROR: resource.failcount: Node hanode11 not in this cluster"
+
+ @clean
+ Scenario: Set the failcount to 0
+ When Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "1"
+ When Run "crm resource failcount d set hanode1 0" on "hanode1"
+ Then Resource "d" failcount on "hanode1" is "0"
+
+ @clean
+ Scenario: Set multiple failcounts to 0
+ When Run "sed -i -e '/rm \${OCF_RESKEY_state}/a\' -e "else\nreturn \$OCF_ERR_GENERIC" /usr/lib/ocf/resource.d/heartbeat/Dummy" on "hanode1"
+ And Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "INFINITY"
+ """
+ now have two failcount entries, one is monitor, another is stop
+ """
+ When Run "crm resource failcount d set hanode1 0" on "hanode1"
+ """
+ set all failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "0"
+ When Run "crm resource cleanup" on "hanode1"
+ And Wait "5" seconds
+ And Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "INFINITY"
+ """
+ now have two failcount entries, one is monitor, another is stop
+ """
+ When Run "crm resource failcount d set hanode1 0 stop" on "hanode1"
+ """
+ set stop failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "1"
+ When Run "crm resource failcount d set hanode1 0 monitor" on "hanode1"
+ """
+ set monitor failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "0"
+
diff --git a/test/features/resource_set.feature b/test/features/resource_set.feature
new file mode 100644
index 0000000..a6726d7
--- /dev/null
+++ b/test/features/resource_set.feature
@@ -0,0 +1,154 @@
+@resource
+Feature: Use "crm configure set" to update attributes and operations
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup cluster and configure some resources
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ Then Resource "d" type "Dummy" is "Started"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0 op monitor interval=3s" on "hanode1"
+ Then Resource "vip" type "IPaddr2" is "Started"
+ And Cluster virtual IP is "@vip.0"
+ When Run "crm configure primitive s ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
+ Then Resource "s" type "Stateful" is "Started"
+
+ @clean
+ Scenario: Validation, input the wrong parameters
+ When Try "crm configure set path"
+ Then Except "ERROR: configure.set: Expected (path value), takes exactly 2 arguments (1 given)"
+ When Try "crm configure set xxxx value"
+ Then Except "ERROR: configure.set: Invalid path: "xxxx"; Valid path: "id.[op_type.][interval.]name""
+ When Try "crm configure set xxxx.name value"
+ Then Except "ERROR: configure.set: Object xxxx not found"
+ When Try "crm configure set d.name value"
+ Then Except "ERROR: configure.set: Attribute not found: d.name"
+ When Try "crm configure set d.monitor.100.timeout 10"
+ Then Except "ERROR: configure.set: Operation "monitor" interval "100" not found for resource d"
+ When Try "crm configure set s.monitor.interval 20"
+ Then Except "ERROR: configure.set: Should specify interval of monitor"
+
+ @clean
+ Scenario: Using configure.set to update resource parameters and operation values
+ When Run "crm configure set vip.ip @vip.0" on "hanode1"
+ Then Cluster virtual IP is "@vip.0"
+ When Run "crm configure set d.monitor.on-fail ignore" on "hanode1"
+ And Run "crm configure show d" on "hanode1"
+ Then Expected "on-fail=ignore" in stdout
+ When Run "crm configure set s.monitor.5s.interval 20s" on "hanode1"
+ And Run "crm configure show s" on "hanode1"
+ Then Expected "interval=20s" in stdout
+ When Run "crm configure set op-options.timeout 101" on "hanode1"
+ And Run "crm configure show op-options" on "hanode1"
+ Then Expected "timeout=101" in stdout
+
+ @clean
+ Scenario: Parse node and lifetime correctly (bsc#1192618)
+ Given Resource "d" is started on "hanode1"
+ # move <res> <node>
+ When Run "crm resource move d hanode2" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> force
+ When Run "crm resource move d hanode1" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> force
+ When Run "crm resource move d force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <lifetime> force
+ When Run "crm resource move d PT5M force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> <lifetime>
+ When Run "crm resource move d hanode2 PT5M" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> <lifetime> force
+ When Run "crm resource move d hanode1 PT5M force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ When Try "crm resource move d hanode2 PT5M force xxx"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d hanode2 PT5M forcd"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d xxxx PT5M force"
+ Then Except "ERROR: resource.move: Not our node: xxxx"
+ When Try "crm resource move d"
+ Then Except "ERROR: resource.move: No target node: Move requires either a target node or 'force'"
+
+ @clean
+ Scenario: promote and demote promotable clone resource (bsc#1194125)
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
+ And Run "crm configure clone p2 s2 meta promotable=true" on "hanode1"
+ And Run "crm resource demote p2" on "hanode1"
+ Then Run "sleep 2;! crm_resource --locate -r p2|grep -E 'Master|Promoted'" OK
+ When Run "crm resource promote p2" on "hanode2"
+ Then Run "sleep 2;crm_resource --locate -r p2|grep -E 'Master|Promoted'" OK
+
+ @clean
+ Scenario: operation warning
+ When Run "crm configure primitive id=d2 Dummy op start interval=5s" on "hanode1"
+ Then Expected "WARNING: d2: Specified interval for start is 5s, it must be 0" in stderr
+ When Run "crm configure primitive id=d3 Dummy op monitor interval=0" on "hanode1"
+ Then Expected "WARNING: d3: interval in monitor should be larger than 0, advised is 10s" in stderr
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=3s" on "hanode1"
+ Then Expected "WARNING: s2: interval in monitor must be unique, advised is 11s" in stderr
+ When Run "crm configure primitive id=d4 Dummy op start timeout=10s" on "hanode1"
+ Then Expected "WARNING: d4: specified timeout 10s for start is smaller than the advised 20s" in stderr
+
+ @clean
+ Scenario: trace ra with specific directory
+ When Run "crm resource trace d monitor" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /var/lib/heartbeat/trace_ra/Dummy" in stdout
+ When Wait "10" seconds
+ Then Run "bash -c 'ls /var/lib/heartbeat/trace_ra/Dummy/d.monitor.*'" OK
+ When Run "crm resource untrace d" on "hanode1"
+ Then Expected "Stop tracing d" in stdout
+ When Run "crm resource trace d monitor /trace_log_d" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /trace_log_d/Dummy" in stdout
+ When Wait "10" seconds
+ Then Run "bash -c 'ls /trace_log_d/Dummy/d.monitor.*'" OK
+ When Run "crm resource untrace d" on "hanode1"
+ Then Expected "Stop tracing d" in stdout
+
+ @clean
+ Scenario: Add promotable=true and interleave=true automatically (bsc#1205522)
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful" on "hanode1"
+ And Run "crm configure clone p2 s2" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p2 s2'|grep 'promotable=true interleave=true'" OK
+ When Run "crm configure primitive s3 ocf:pacemaker:Stateful" on "hanode1"
+ And Run "crm configure clone p3 s3 meta promotable=false" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p3 s3'|grep 'promotable=false interleave=true'" OK
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ And Run "crm configure clone p4 d2" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p4 d2'|grep 'interleave=true'" OK
+
+ @clean
+ Scenario: Run rsctest
+ When Run "crm resource stop d vip" on "hanode1"
+ When Run "crm configure rsctest d vip" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ INFO: Probing resources
+ INFO: Testing on hanode1: d vip
+ INFO: Testing on hanode2: d vip
+ """
diff --git a/test/features/ssh_agent.feature b/test/features/ssh_agent.feature
new file mode 100644
index 0000000..5c632dd
--- /dev/null
+++ b/test/features/ssh_agent.feature
@@ -0,0 +1,86 @@
+# vim: sw=2 sts=2
+Feature: ssh-agent support
+
+ Test ssh-agent support for crmsh
+ Need nodes: hanode1 hanode2 hanode3 qnetd-node
+
+ Scenario: Errors are reported when ssh-agent is not avaible
+ When Try "crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "Environment variable SSH_AUTH_SOCK does not exist." in stderr
+ When Try "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "Environment variable SSH_AUTH_SOCK does not exist." not in stderr
+
+ Scenario: Errors are reported when there are no keys in ssh-agent
+ Given ssh-agent is started at "/tmp/ssh-auth-sock" on nodes ["hanode1", "hanode2", "hanode3"]
+ When Try "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "ssh-add" in stderr
+
+ Scenario: Skip creating ssh key pairs with --use-ssh-agent
+ Given Run "mkdir ~/ssh_disabled" OK on "hanode1,hanode2,hanode3"
+ And Run "mv ~/.ssh/id_* ~/ssh_disabled" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock ssh-add ~/ssh_disabled/id_rsa" on "hanode1,hanode2,hanode3"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode2"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode3"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ # check the number of keys in authorized_keys
+ And Run "test x1 == x$(awk 'END {print NR}' ~/.ssh/authorized_keys)" OK
+ And Run "test x3 == x$(sudo awk 'END {print NR}' ~hacluster/.ssh/authorized_keys)" OK
+
+ Scenario: Skip creating ssh key pairs with --use-ssh-agent and use -N
+ Given Run "crm cluster stop" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y -N hanode2 -N hanode3" on "hanode1"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Run "test x1 == x$(awk 'END {print NR}' ~/.ssh/authorized_keys)" OK on "hanode3"
+ And Run "test x3 == x$(sudo awk 'END {print NR}' ~hacluster/.ssh/authorized_keys)" OK on "hanode3"
+
+ Scenario: crm report
+ Then Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm report /tmp/report1" OK on "hanode1"
+ Then Directory "hanode2" in "/tmp/report1.tar.bz2"
+ Then Directory "hanode3" in "/tmp/report1.tar.bz2"
+
+ Scenario: Use qnetd
+ Given Run "crm cluster stop" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init qdevice --use-ssh-agent -y --qnetd-hostname qnetd-node" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+
+ Scenario: Use qnetd with -N
+ Given Run "crm cluster stop" OK on "hanode1,hanode2"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y -N hanode2 --qnetd-hostname qnetd-node" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+
+ Scenario: GEO cluster setup with ssh-agent
+ Given Run "crm cluster stop" OK on "hanode1,hanode2"
+ And Run "systemctl disable --now booth@booth" OK on "hanode1,hanode2,hanode3"
+ And Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init -y -n cluster1 --use-ssh-agent" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on "hanode1"
+
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init -y -n cluster2 --use-ssh-agent" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on "hanode2"
+
+ When Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster geo_join -y --use-ssh-agent --cluster-node hanode1 --clusters "cluster1=@vip.0 cluster2=@vip.1"" on "hanode2"
+
+ Given Service "booth@booth" is "stopped" on "hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster geo_init_arbitrator -y --use-ssh-agent --cluster-node hanode1" on "hanode3"
+ Then Service "booth@booth" is "started" on "hanode3"
+ When Run "crm resource start g-booth" on "hanode1"
+ Then Show cluster status on "hanode1"
+ When Run "crm resource start g-booth" on "hanode2"
+ Then Show cluster status on "hanode2"
diff --git a/test/features/steps/__init__.py b/test/features/steps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/features/steps/__init__.py
diff --git a/test/features/steps/behave_agent.py b/test/features/steps/behave_agent.py
new file mode 100755
index 0000000..eafeedd
--- /dev/null
+++ b/test/features/steps/behave_agent.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python3
+# behave_agent.py - a simple agent to execute command
+# NO AUTHENTICATIONS. It should only be used in behave test.
+import io
+import os
+import pwd
+import socket
+import struct
+import subprocess
+import typing
+
+
+MSG_EOF = 0
+MSG_USER = 1
+MSG_CMD = 2
+MSG_OUT = 4
+MSG_ERR = 5
+MSG_RC = 6
+
+
+class Message:
+ @staticmethod
+ def write(output, type: int, data: bytes):
+ output.write(struct.pack('!ii', type, len(data)))
+ output.write(data)
+
+ @staticmethod
+ def read(input):
+ buf = input.read(8)
+ type, length = struct.unpack('!ii', buf)
+ if length > 0:
+ buf = input.read(length)
+ else:
+ buf = b''
+ return type, buf
+
+
+class SocketIO(io.RawIOBase):
+ def __init__(self, s: socket.socket):
+ self._socket = s
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return True
+
+ def read(self, __size: int = -1) -> bytes:
+ return self._socket.recv(__size)
+
+ def readinto(self, __buffer) -> int:
+ return self._socket.recv_into(__buffer)
+
+ def readall(self) -> bytes:
+ raise NotImplementedError
+
+ def write(self, __b) -> int:
+ return self._socket.send(__b)
+
+
+def call(host: str, port: int, cmdline: str, user: typing.Optional[str] = None):
+ family, type, proto, _, sockaddr = socket.getaddrinfo(host, port, type=socket.SOCK_STREAM)[0]
+ with socket.socket(family, type, proto) as s:
+ s.connect(sockaddr)
+ sout = io.BufferedWriter(SocketIO(s), 4096)
+ Message.write(sout, MSG_USER, user.encode('utf-8') if user else _getuser().encode('utf-8'))
+ Message.write(sout, MSG_CMD, cmdline.encode('utf-8'))
+ Message.write(sout, MSG_EOF, b'')
+ sout.flush()
+ s.shutdown(socket.SHUT_WR)
+ rc = None
+ stdout = []
+ stderr = []
+ sin = io.BufferedReader(SocketIO(s), 4096)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_OUT:
+ stdout.append(buf)
+ elif type == MSG_ERR:
+ stderr.append(buf)
+ elif type == MSG_RC:
+ rc, = struct.unpack('!i', buf)
+ elif type == MSG_EOF:
+ assert rc is not None
+ return rc, b''.join(stdout), b''.join(stderr)
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+
+
+def serve(stdin, stdout, stderr):
+ # This is an xinetd-style service.
+ assert os.geteuid() == 0
+ user = None
+ cmd = None
+ sin = io.BufferedReader(stdin)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_USER:
+ user = buf.decode('utf-8')
+ elif type == MSG_CMD:
+ cmd = buf.decode('utf-8')
+ elif type == MSG_EOF:
+ assert user is not None
+ assert cmd is not None
+ break
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+ if user == 'root':
+ args = ['/bin/sh']
+ else:
+ args = ['/bin/su', '-', user, '-c', '/bin/sh']
+ result = subprocess.run(
+ args,
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ sout = io.BufferedWriter(stdout)
+ Message.write(sout, MSG_RC, struct.pack('!i', result.returncode))
+ Message.write(sout, MSG_OUT, result.stdout)
+ Message.write(sout, MSG_ERR, result.stderr)
+ Message.write(sout, MSG_EOF, b'')
+ stdout.flush()
+
+
+def _getuser():
+ return pwd.getpwuid(os.geteuid()).pw_name
+
+
+if __name__ == '__main__':
+ with open(0, 'rb') as stdin, \
+ open(1, 'wb') as stdout, \
+ open(2, 'wb') as stderr:
+ serve(stdin, stdout, stderr)
diff --git a/test/features/steps/const.py b/test/features/steps/const.py
new file mode 100644
index 0000000..3ec8845
--- /dev/null
+++ b/test/features/steps/const.py
@@ -0,0 +1,353 @@
+CRM_H_OUTPUT = '''usage: crm [-h|--help] [OPTIONS] [SUBCOMMAND ARGS...]
+or crm help SUBCOMMAND
+
+For a list of available subcommands, use crm help.
+
+Use crm without arguments for an interactive session.
+Call a subcommand directly for a "single-shot" use.
+Call crm with a level name as argument to start an interactive
+session from that level.
+
+See the crm(8) man page or call crm help for more details.
+
+positional arguments:
+ SUBCOMMAND
+
+optional arguments:
+ -h, --help show this help message and exit
+ --version show program's version number and exit
+ -f FILE, --file FILE Load commands from the given file. If a dash (-) is
+ used in place of a file name, crm will read commands
+ from the shell standard input (stdin).
+ -c CIB, --cib CIB Start the session using the given shadow CIB file.
+ Equivalent to `cib use <CIB>`.
+ -D OUTPUT_TYPE, --display OUTPUT_TYPE
+ Choose one of the output options: plain, color-always,
+ color, or uppercase. The default is color if the
+ terminal emulation supports colors, else plain.
+ -F, --force Make crm proceed with applying changes where it would
+ normally ask the user to confirm before proceeding.
+ This option is mainly useful in scripts, and should be
+ used with care.
+ -n, --no Automatically answer no when prompted
+ -w, --wait Make crm wait for the cluster transition to finish
+ (for the changes to take effect) after each processed
+ line.
+ -H DIR|FILE|SESSION, --history DIR|FILE|SESSION
+ A directory or file containing a cluster report to
+ load into history, or the name of a previously saved
+ history session.
+ -d, --debug Print verbose debugging information.
+ -R, --regression-tests
+ Enables extra verbose trace logging used by the
+ regression tests. Logs all external calls made by
+ crmsh.
+ --scriptdir DIR Extra directory where crm looks for cluster scripts,
+ or a list of directories separated by semi-colons
+ (e.g. /dir1;/dir2;etc.).
+ -X PROFILE Collect profiling data and save in PROFILE.
+ -o OPTION=VALUE, --opt OPTION=VALUE
+ Set crmsh option temporarily. If the options are saved
+ using+options save+ then the value passed here will
+ also be saved.Multiple options can be set by using
+ +-o+ multiple times.'''
+
+
+CRM_CLUSTER_INIT_H_OUTPUT = '''Initializes a new HA cluster
+
+usage: init [options] [STAGE]
+
+Initialize a cluster from scratch. This command configures
+a complete cluster, and can also add additional cluster
+nodes to the initial one-node cluster using the --nodes
+option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution, this is
+ destructive, especially those storage related
+ configurations and stages.)
+ -n NAME, --name NAME Set the name of the configured cluster.
+ -N [USER@]HOST, --node [USER@]HOST
+ The member node of the cluster. Note: the current node
+ is always get initialized during bootstrap in the
+ beginning.
+ -S, --enable-sbd Enable SBD even if no SBD device is configured
+ (diskless mode)
+ -w WATCHDOG, --watchdog WATCHDOG
+ Use the given watchdog device or driver name
+ -x, --skip-csync2-sync
+ Skip csync2 initialization (an experimental option)
+ --no-overwrite-sshkey
+ Avoid "/root/.ssh/id_rsa" overwrite if "-y" option is
+ used (False by default; Deprecated)
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs
+
+Network configuration:
+ Options for configuring the network and messaging layer.
+
+ -i IF, --interface IF
+ Bind to IP address on interface IF. Use -i second time
+ for second interface
+ -u, --unicast Configure corosync to communicate over unicast(udpu).
+ This is the default transport type
+ -U, --multicast Configure corosync to communicate over multicast.
+ Default is unicast
+ -A IP, --admin-ip IP Configure IP address as an administration virtual IP
+ -M, --multi-heartbeats
+ Configure corosync with second heartbeat line
+ -I, --ipv6 Configure corosync use IPv6
+
+QDevice configuration:
+ QDevice participates in quorum decisions. With the assistance of
+ a third-party arbitrator Qnetd, it provides votes so that a cluster
+ is able to sustain more node failures than standard quorum rules
+ allow. It is recommended for clusters with an even number of nodes
+ and highly recommended for 2 node clusters.
+
+ Options for configuring QDevice and QNetd.
+
+ --qnetd-hostname [USER@]HOST
+ User and host of the QNetd server. The host can be
+ specified in either hostname or IP address.
+ --qdevice-port PORT TCP PORT of QNetd server (default:5403)
+ --qdevice-algo ALGORITHM
+ QNetd decision ALGORITHM (ffsplit/lms,
+ default:ffsplit)
+ --qdevice-tie-breaker TIE_BREAKER
+ QNetd TIE_BREAKER (lowest/highest/valid_node_id,
+ default:lowest)
+ --qdevice-tls TLS Whether using TLS on QDevice/QNetd (on/off/required,
+ default:on)
+ --qdevice-heuristics COMMAND
+ COMMAND to run with absolute path. For multiple
+ commands, use ";" to separate (details about
+ heuristics can see man 8 corosync-qdevice)
+ --qdevice-heuristics-mode MODE
+ MODE of operation of heuristics (on/sync/off,
+ default:sync)
+
+Storage configuration:
+ Options for configuring shared storage.
+
+ -s DEVICE, --sbd-device DEVICE
+ Block device to use for SBD fencing, use ";" as
+ separator or -s multiple times for multi path (up to 3
+ devices)
+ -o DEVICE, --ocfs2-device DEVICE
+ Block device to use for OCFS2; When using Cluster LVM2
+ to manage the shared storage, user can specify one or
+ multiple raw disks, use ";" as separator or -o
+ multiple times for multi path (must specify -C option)
+ NOTE: this is a Technical Preview
+ -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o option)
+ NOTE: this is a Technical Preview
+ -m MOUNT, --mount-point MOUNT
+ Mount point for OCFS2 device (default is
+ /srv/clusterfs, only valid together with -o option)
+ NOTE: this is a Technical Preview
+
+Stage can be one of:
+ ssh Create SSH keys for passwordless SSH between cluster nodes
+ csync2 Configure csync2
+ corosync Configure corosync
+ sbd Configure SBD (requires -s <dev>)
+ cluster Bring the cluster online
+ ocfs2 Configure OCFS2 (requires -o <dev>) NOTE: this is a Technical Preview
+ vgfs Create volume group and filesystem (ocfs2 template only,
+ requires -o <dev>) NOTE: this stage is an alias of ocfs2 stage
+ admin Create administration virtual IP (optional)
+ qdevice Configure qdevice and qnetd
+
+Note:
+ - If stage is not specified, the script will run through each stage
+ in sequence, with prompts for required information.
+
+Examples:
+ # Setup the cluster on the current node
+ crm cluster init -y
+
+ # Setup the cluster with multiple nodes
+ (NOTE: the current node will be part of the cluster even not listed in the -N option as below)
+ crm cluster init -N node1 -N node2 -N node3 -y
+
+ # Setup the cluster on the current node, with two network interfaces
+ crm cluster init -i eth1 -i eth2 -y
+
+ # Setup the cluster on the current node, with disk-based SBD
+ crm cluster init -s <share disk> -y
+
+ # Setup the cluster on the current node, with diskless SBD
+ crm cluster init -S -y
+
+ # Setup the cluster on the current node, with QDevice
+ crm cluster init --qnetd-hostname <qnetd addr> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2
+ crm cluster init -s <share disk1> -o <share disk2> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM
+ crm cluster init -s <share disk1> -o <share disk2> -o <share disk3> -C -y
+
+ # Add SBD on a running cluster
+ crm cluster init sbd -s <share disk> -y
+
+ # Replace SBD device on a running cluster which already configured SBD
+ crm -F cluster init sbd -s <share disk> -y
+
+ # Add diskless SBD on a running cluster
+ crm cluster init sbd -S -y
+
+ # Add QDevice on a running cluster
+ crm cluster init qdevice --qnetd-hostname <qnetd addr> -y
+
+ # Add OCFS2+Cluster LVM on a running cluster
+ crm cluster init ocfs2 -o <share disk1> -o <share disk2> -C -y'''
+
+
+CRM_CLUSTER_JOIN_H_OUTPUT = '''Join existing cluster
+
+usage: join [options] [STAGE]
+
+Join the current node to an existing cluster. The
+current node cannot be a member of a cluster already.
+Pass any node in the existing cluster as the argument
+to the -c option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -w WATCHDOG, --watchdog WATCHDOG
+ Use the given watchdog device
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs
+
+Network configuration:
+ Options for configuring the network and messaging layer.
+
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ User and host to login to an existing cluster node.
+ The host can be specified with either a hostname or an
+ IP.
+ -i IF, --interface IF
+ Bind to IP address on interface IF. Use -i second time
+ for second interface
+
+Stage can be one of:
+ ssh Obtain SSH keys from existing cluster node (requires -c <host>)
+ csync2 Configure csync2 (requires -c <host>)
+ ssh_merge Merge root's SSH known_hosts across all nodes (csync2 must
+ already be configured).
+ cluster Start the cluster on this node
+
+If stage is not specified, each stage will be invoked in sequence.
+
+Examples:
+ # Join with a cluster node
+ crm cluster join -c <node> -y
+
+ # Join with a cluster node, with the same network interface used by that node
+ crm cluster join -c <node> -i eth1 -i eth2 -y'''
+
+
+CRM_CLUSTER_REMOVE_H_OUTPUT = '''Remove node(s) from the cluster
+
+usage: remove [options] [<node> ...]
+
+Remove one or more nodes from the cluster.
+
+This command can remove the last node in the cluster,
+thus effectively removing the whole cluster. To remove
+the last node, pass --force argument to crm or set
+the config.core.force option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c HOST, --cluster-node HOST
+ IP address or hostname of cluster node which will be
+ deleted
+ -F, --force Remove current node
+ --qdevice Remove QDevice configuration and service from cluster'''
+
+
+CRM_CLUSTER_GEO_INIT_H_OUTPUT = '''Configure cluster as geo cluster
+
+usage: geo-init [options]
+
+Create a new geo cluster with the current cluster as the
+first member. Pass the complete geo cluster topology as
+arguments to this command, and then use geo-join and
+geo-init-arbitrator to add the remaining members to
+the geo cluster.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -a [USER@]HOST, --arbitrator [USER@]HOST
+ Geo cluster arbitrator
+ -s DESC, --clusters DESC
+ Geo cluster description (see details below)
+ -t LIST, --tickets LIST
+ Tickets to create (space-separated)
+
+Cluster Description
+
+ This is a map of cluster names to IP addresses.
+ Each IP address will be configured as a virtual IP
+ representing that cluster in the geo cluster
+ configuration.
+
+ Example with two clusters named paris and amsterdam:
+
+ --clusters "paris=192.168.10.10 amsterdam=192.168.10.11"
+
+ Name clusters using the --name parameter to
+ crm bootstrap init.'''
+
+
+CRM_CLUSTER_GEO_JOIN_H_OUTPUT = '''Join cluster to existing geo cluster
+
+usage: geo-join [options]
+
+This command should be run from one of the nodes in a cluster
+which is currently not a member of a geo cluster. The geo
+cluster configuration will be fetched from the provided node,
+and the cluster will be added to the geo cluster.
+
+Note that each cluster in a geo cluster needs to have a unique
+name set. The cluster name can be set using the --name argument
+to init, or by configuring corosync with the cluster name in
+an existing cluster.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ An already-configured geo cluster or arbitrator
+ -s DESC, --clusters DESC
+ Geo cluster description (see geo-init for details)'''
+
+
+CRM_CLUSTER_GEO_INIT_ARBIT_H_OUTPUT = '''Initialize node as geo cluster arbitrator
+
+usage: geo-init-arbitrator [options]
+
+Configure the current node as a geo arbitrator. The command
+requires an existing geo cluster or geo arbitrator from which
+to get the geo cluster configuration.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ An already-configured geo cluster
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs'''
diff --git a/test/features/steps/step_implementation.py b/test/features/steps/step_implementation.py
new file mode 100644
index 0000000..74f0cc8
--- /dev/null
+++ b/test/features/steps/step_implementation.py
@@ -0,0 +1,575 @@
+import re
+import time
+import os
+import datetime
+import yaml
+
+import behave
+from behave import given, when, then
+import behave_agent
+from crmsh import corosync, sbd, userdir, bootstrap
+from crmsh import utils as crmutils
+from crmsh.sh import ShellUtils
+from utils import check_cluster_state, check_service_state, online, run_command, me, \
+ run_command_local_or_remote, file_in_archive, \
+ assert_eq, is_unclean, assert_in
+import const
+
+
+def _parse_str(text):
+ return text[1:-1].encode('utf-8').decode('unicode_escape')
+_parse_str.pattern='".*"'
+
+
+behave.use_step_matcher("cfparse")
+behave.register_type(str=_parse_str)
+
+
+@when('Write multi lines to file "{f}" on "{addr}"')
+def step_impl(context, f, addr):
+ data_list = context.text.split('\n')
+ for line in data_list:
+ echo_option = " -n" if line == data_list[-1] else ""
+ cmd = "echo{} \"{}\"|sudo tee -a {}".format(echo_option, line, f)
+ if addr != me():
+ sudoer = userdir.get_sudoer()
+ user = f"{sudoer}@" if sudoer else ""
+ cmd = f"ssh {user}{addr} '{cmd}'"
+ run_command(context, cmd)
+
+
+@given('Cluster service is "{state}" on "{addr}"')
+def step_impl(context, state, addr):
+ assert check_cluster_state(context, state, addr) is True
+
+
+@given('Nodes [{nodes:str+}] are cleaned up')
+def step_impl(context, nodes):
+ run_command(context, 'crm resource cleanup || true')
+ for node in nodes:
+ # wait for ssh service
+ for _ in range(10):
+ rc, _, _ = ShellUtils().get_stdout_stderr('ssh {} true'.format(node))
+ if rc == 0:
+ break
+ time.sleep(1)
+ run_command_local_or_remote(context, "crm cluster stop {} || true".format(node), node)
+ assert check_cluster_state(context, 'stopped', node) is True
+
+
+@given('Service "{name}" is "{state}" on "{addr}"')
+def step_impl(context, name, state, addr):
+ assert check_service_state(context, name, state, addr) is True
+
+
+@given('Has disk "{disk}" on "{addr}"')
+def step_impl(context, disk, addr):
+ _, out, _ = run_command_local_or_remote(context, "fdisk -l", addr)
+ assert re.search(r'{} '.format(disk), out) is not None
+
+
+@given('Online nodes are "{nodelist}"')
+def step_impl(context, nodelist):
+ assert online(context, nodelist) is True
+
+
+@given('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@then('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@when('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@given('IP "{addr}" is belong to "{iface}"')
+def step_impl(context, addr, iface):
+ cmd = 'ip address show dev {}'.format(iface)
+ res = re.search(r' {}/'.format(addr), run_command(context, cmd)[1])
+ assert bool(res) is True
+
+
+@given('Run "{cmd}" OK on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr, True)
+
+@when('Run "{cmd}" on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr)
+
+
+@then('Run "{cmd}" OK on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr)
+
+
+@then('Print stdout')
+def step_impl(context):
+ context.logger.info("\n{}".format(context.stdout))
+
+
+@then('Print stderr')
+def step_impl(context):
+ context.logger.info("\n{}".format(context.stderr))
+
+
+@then('No crmsh tracebacks')
+def step_impl(context):
+ if "Traceback (most recent call last):" in context.stderr and \
+ re.search('File "/usr/lib/python.*/crmsh/', context.stderr):
+ context.logger.info("\n{}".format(context.stderr))
+ context.failed = True
+
+
+@when('Try "{cmd}" on "{addr}"')
+def step_impl(context, cmd, addr):
+ run_command_local_or_remote(context, cmd, addr, exit_on_fail=False)
+
+
+@when('Try "{cmd}"')
+def step_impl(context, cmd):
+ _, out, _ = run_command(context, cmd, exit_on_fail=False)
+
+
+@when('Wait "{second}" seconds')
+def step_impl(context, second):
+ time.sleep(int(second))
+
+
+@then('Got output "{msg}"')
+def step_impl(context, msg):
+ assert context.stdout == msg
+ context.stdout = None
+
+
+@then('Expected multiple lines')
+def step_impl(context):
+ assert context.stdout == context.text
+ context.stdout = None
+
+
+@then('Expected "{msg}" in stdout')
+def step_impl(context, msg):
+ assert_in(msg, context.stdout)
+ context.stdout = None
+
+
+@then('Expected "{msg}" in stderr')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Expected regrex "{reg_str}" in stdout')
+def step_impl(context, reg_str):
+ res = re.search(reg_str, context.stdout)
+ assert res is not None
+ context.stdout = None
+
+
+@then('Expected return code is "{num}"')
+def step_impl(context, num):
+ assert context.return_code == int(num)
+
+
+@then('Expected "{msg}" not in stdout')
+def step_impl(context, msg):
+ assert msg not in context.stdout
+ context.stdout = None
+
+
+@then('Expected "{msg}" not in stderr')
+def step_impl(context, msg):
+ assert context.stderr is None or msg not in context.stderr
+ context.stderr = None
+
+
+@then('Except "{msg}"')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Except multiple lines')
+def step_impl(context):
+ assert_in(context.text, context.stderr)
+ context.stderr = None
+
+
+@then('Expected multiple lines in output')
+def step_impl(context):
+ assert_in(context.text, context.stdout)
+ context.stdout = None
+
+
+@then('Except "{msg}" in stderr')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Cluster service is "{state}" on "{addr}"')
+def step_impl(context, state, addr):
+ assert check_cluster_state(context, state, addr) is True
+
+
+@then('Service "{name}" is "{state}" on "{addr}"')
+def step_impl(context, name, state, addr):
+ assert check_service_state(context, name, state, addr) is True
+
+
+@then('Online nodes are "{nodelist}"')
+def step_impl(context, nodelist):
+ assert online(context, nodelist) is True
+
+
+@then('Node "{node}" is standby')
+def step_impl(context, node):
+ assert crmutils.is_standby(node) is True
+
+
+@then('Node "{node}" is online')
+def step_impl(context, node):
+ assert crmutils.is_standby(node) is False
+
+
+@then('IP "{addr}" is used by corosync on "{node}"')
+def step_impl(context, addr, node):
+ _, out, _ = run_command_local_or_remote(context, 'corosync-cfgtool -s', node)
+ res = re.search(r' {}\n'.format(addr), out)
+ assert bool(res) is True
+
+
+@then('Cluster name is "{name}"')
+def step_impl(context, name):
+ _, out, _ = run_command(context, 'corosync-cmapctl -b totem.cluster_name')
+ assert out.split()[-1] == name
+
+
+@then('Cluster virtual IP is "{addr}"')
+def step_impl(context, addr):
+ _, out, _ = run_command(context, 'crm configure show|grep -A1 IPaddr2')
+ res = re.search(r' ip={}'.format(addr), out)
+ assert bool(res) is True
+
+
+@then('Cluster is using udpu transport mode')
+def step_impl(context):
+ assert corosync.get_value('totem.transport') == 'udpu'
+
+
+@then('Show cluster status on "{addr}"')
+def step_impl(context, addr):
+ _, out, _ = run_command_local_or_remote(context, 'crm_mon -1', addr)
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show corosync ring status')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status ring')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show crm configure')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm configure show')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show status from qnetd')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status qnetd')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show qdevice status')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status qdevice')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show corosync qdevice configuration')
+def step_impl(context):
+ _, out, _ = run_command(context, "sed -n -e '/quorum/,/^}/ p' /etc/corosync/corosync.conf")
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Resource "{res}" type "{res_type}" is "{state}"')
+def step_impl(context, res, res_type, state):
+ try_count = 0
+ result = None
+ while try_count < 20:
+ time.sleep(1)
+ _, out, _ = run_command(context, "crm_mon -1rR")
+ if out:
+ result = re.search(r'\s{}\s+.*:+{}\):\s+{} '.format(res, res_type, state), out)
+ if not result:
+ try_count += 1
+ else:
+ break
+ assert result is not None
+
+
+@then('Resource "{res}" failcount on "{node}" is "{number}"')
+def step_impl(context, res, node, number):
+ cmd = "crm resource failcount {} show {}".format(res, node)
+ _, out, _ = run_command(context, cmd)
+ if out:
+ result = re.search(r'name=fail-count-{} value={}'.format(res, number), out)
+ assert result is not None
+
+
+@then('Resource "{res_type}" not configured')
+def step_impl(context, res_type):
+ _, out, _ = run_command(context, "crm configure show")
+ result = re.search(r' {} '.format(res_type), out)
+ assert result is None
+
+
+@then('Output is the same with expected "{cmd}" help output')
+def step_impl(context, cmd):
+ cmd_help = {}
+ cmd_help["crm"] = const.CRM_H_OUTPUT
+ cmd_help["crm_cluster_init"] = const.CRM_CLUSTER_INIT_H_OUTPUT
+ cmd_help["crm_cluster_join"] = const.CRM_CLUSTER_JOIN_H_OUTPUT
+ cmd_help["crm_cluster_remove"] = const.CRM_CLUSTER_REMOVE_H_OUTPUT
+ cmd_help["crm_cluster_geo-init"] = const.CRM_CLUSTER_GEO_INIT_H_OUTPUT
+ cmd_help["crm_cluster_geo-join"] = const.CRM_CLUSTER_GEO_JOIN_H_OUTPUT
+ cmd_help["crm_cluster_geo-init-arbitrator"] = const.CRM_CLUSTER_GEO_INIT_ARBIT_H_OUTPUT
+ key = '_'.join(cmd.split())
+ assert_eq(cmd_help[key], context.stdout)
+
+
+@then('Corosync working on "{transport_type}" mode')
+def step_impl(context, transport_type):
+ if transport_type == "multicast":
+ assert corosync.get_value("totem.transport") is None
+ if transport_type == "unicast":
+ assert_eq("udpu", corosync.get_value("totem.transport"))
+
+
+@then('Expected votes will be "{votes}"')
+def step_impl(context, votes):
+ assert_eq(int(votes), int(corosync.get_value("quorum.expected_votes")))
+
+
+@then('Directory "{directory}" created')
+def step_impl(context, directory):
+ assert os.path.isdir(directory) is True
+
+
+@then('Directory "{directory}" not created')
+def step_impl(context, directory):
+ assert os.path.isdir(directory) is False
+
+
+@then('Default crm_report tar file created')
+def step_impl(context):
+ default_file_name = 'crm_report-{}.tar.bz2'.format(datetime.datetime.now().strftime("%a-%d-%b-%Y"))
+ assert os.path.exists(default_file_name) is True
+
+
+@when('Remove default crm_report tar file')
+def step_impl(context):
+ default_file_name = 'crm_report-{}.tar.bz2'.format(datetime.datetime.now().strftime("%a-%d-%b-%Y"))
+ os.remove(default_file_name)
+
+
+@then('File "{f}" in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is True
+
+
+@then('Directory "{f}" in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is True
+
+
+@then('File "{f}" not in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is False
+
+
+@then('File "{f}" was synced in cluster')
+def step_impl(context, f):
+ cmd = "crm cluster diff {}".format(f)
+ rc, out, _ = run_command(context, cmd)
+ assert_eq("", out)
+
+
+@given('Resource "{res_id}" is started on "{node}"')
+def step_impl(context, res_id, node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ assert re.search(r'\*\s+{}\s+.*Started\s+{}'.format(res_id, node), out) is not None
+
+
+@then('Resource "{res_id}" is started on "{node}"')
+def step_impl(context, res_id, node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ assert re.search(r'\*\s+{}\s+.*Started\s+{}'.format(res_id, node), out) is not None
+
+
+@then('SBD option "{key}" value is "{value}"')
+def step_impl(context, key, value):
+ res = sbd.SBDManager.get_sbd_value_from_config(key)
+ assert_eq(value, res)
+
+
+@then('SBD option "{key}" value for "{dev}" is "{value}"')
+def step_impl(context, key, dev, value):
+ res = sbd.SBDTimeout.get_sbd_msgwait(dev)
+ assert_eq(int(value), res)
+
+
+@then('Cluster property "{key}" is "{value}"')
+def step_impl(context, key, value):
+ res = crmutils.get_property(key)
+ assert res is not None
+ assert_eq(value, str(res))
+
+
+@then('Property "{key}" in "{type}" is "{value}"')
+def step_impl(context, key, type, value):
+ res = crmutils.get_property(key, type)
+ assert res is not None
+ assert_eq(value, str(res))
+
+
+@then('Parameter "{param_name}" not configured in "{res_id}"')
+def step_impl(context, param_name, res_id):
+ _, out, _ = run_command(context, "crm configure show {}".format(res_id))
+ result = re.search("params {}=".format(param_name), out)
+ assert result is None
+
+
+@then('Parameter "{param_name}" configured in "{res_id}"')
+def step_impl(context, param_name, res_id):
+ _, out, _ = run_command(context, "crm configure show {}".format(res_id))
+ result = re.search("params {}=".format(param_name), out)
+ assert result is not None
+
+
+@given('Yaml "{path}" value is "{value}"')
+def step_impl(context, path, value):
+ yaml_file = "/etc/crm/profiles.yml"
+ with open(yaml_file) as f:
+ data = yaml.load(f, Loader=yaml.SafeLoader)
+ sec_name, key = path.split(':')
+ assert_eq(str(value), str(data[sec_name][key]))
+
+
+@when('Wait for DC')
+def step_impl(context):
+ while True:
+ time.sleep(1)
+ if crmutils.get_dc():
+ break
+
+
+@then('File "{path}" exists on "{node}"')
+def step_impl(context, path, node):
+ rc, _, stderr = behave_agent.call(node, 1122, 'test -f {}'.format(path), user='root')
+ assert rc == 0
+
+
+@then('File "{path}" not exist on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ ! -f {} ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Directory "{path}" is empty on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ ! "$(ls -A {})" ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Directory "{path}" not empty on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ "$(ls -A {})" ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Node "{node}" is UNCLEAN')
+def step_impl(context, node):
+ assert is_unclean(node) is True
+
+
+@then('Wait "{count}" seconds for "{node}" successfully fenced')
+def step_impl(context, count, node):
+ index = 0
+ while index <= int(count):
+ rc, out, _ = ShellUtils().get_stdout_stderr("stonith_admin -h {}".format(node))
+ if "Node {} last fenced at:".format(node) in out:
+ return True
+ time.sleep(1)
+ index += 1
+ return False
+
+@then('Check passwordless for hacluster between "{nodelist}"')
+def step_impl(context, nodelist):
+ if userdir.getuser() != 'root' or userdir.get_sudoer():
+ return True
+ failed = False
+ nodes = nodelist.split()
+ for i in range(0, len(nodes)):
+ for j in range(i + 1, len(nodes)):
+ rc, _, _ = behave_agent.call(
+ nodes[i], 1122,
+ f'ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 {nodes[j]} true',
+ user='hacluster',
+ )
+ if rc != 0:
+ failed = True
+ context.logger.error(f"There is no passwordless configured from {nodes[i]} to {nodes[j]} under 'hacluster'")
+ assert not failed
+
+
+@then('Check user shell for hacluster between "{nodelist}"')
+def step_impl(context, nodelist):
+ if userdir.getuser() != 'root' or userdir.get_sudoer():
+ return True
+ for node in nodelist.split():
+ if node == me():
+ assert bootstrap.is_nologin('hacluster') is False
+ else:
+ assert bootstrap.is_nologin('hacluster', node) is False
+
+
+@given('ssh-agent is started at "{path}" on nodes [{nodes:str+}]')
+def step_impl(context, path, nodes):
+ user = userdir.get_sudoer()
+ if not user:
+ user = userdir.getuser()
+ for node in nodes:
+ rc, _, _ = behave_agent.call(node, 1122, f"systemd-run --uid '{user}' -u ssh-agent /usr/bin/ssh-agent -D -a '{path}'", user='root')
+ assert 0 == rc
+
+
+@then('This file "{target_file}" will trigger UnicodeDecodeError exception')
+def step_impl(context, target_file):
+ try:
+ with open(target_file, "r", encoding="utf-8") as file:
+ content = file.read()
+ except UnicodeDecodeError as e:
+ return True
+ else:
+ return False
diff --git a/test/features/steps/utils.py b/test/features/steps/utils.py
new file mode 100644
index 0000000..675c2c4
--- /dev/null
+++ b/test/features/steps/utils.py
@@ -0,0 +1,177 @@
+import concurrent.futures
+import difflib
+import tarfile
+import glob
+import re
+import socket
+from crmsh import utils, userdir
+from crmsh.sh import ShellUtils
+import behave_agent
+
+
+COLOR_MODE = r'\x1b\[[0-9]+m'
+
+
+def get_file_type(file_path):
+ rc, out, _ = ShellUtils().get_stdout_stderr("file {}".format(file_path))
+ if re.search(r'{}: bzip2'.format(file_path), out):
+ return "bzip2"
+ if re.search(r'{}: directory'.format(file_path), out):
+ return "directory"
+
+
+def get_all_files(archive_path):
+ archive_type = get_file_type(archive_path)
+ if archive_type == "bzip2":
+ with tarfile.open(archive_path) as tar:
+ return tar.getnames()
+ if archive_type == "directory":
+ all_files = glob.glob("{}/*".format(archive_path)) + glob.glob("{}/*/*".format(archive_path))
+ return all_files
+
+
+def file_in_archive(f, archive_path):
+ for item in get_all_files(archive_path):
+ if re.search(r'/{}$'.format(f), item):
+ return True
+ return False
+
+
+def me():
+ return socket.gethostname()
+
+
+def _wrap_cmd_non_root(cmd):
+ """
+ When running command under sudoer, or the current user is not root,
+ wrap crm cluster join command with '<user>@', and for the -N option, too
+ """
+ sudoer = userdir.get_sudoer()
+ current_user = userdir.getuser()
+ if sudoer:
+ user = sudoer
+ elif current_user != 'root':
+ user = current_user
+ else:
+ return cmd
+ if re.search('cluster (:?join|geo_join|geo_init_arbitrator)', cmd) and "@" not in cmd:
+ cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node|--arbitrator)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
+ elif "cluster init" in cmd and ("-N" in cmd or "--qnetd-hostname" in cmd) and "@" not in cmd:
+ cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
+ elif "cluster init" in cmd and "--node" in cmd and "@" not in cmd:
+ search_patt = r"--node [\'\"](.*)[\'\"]"
+ res = re.search(search_patt, cmd)
+ if res:
+ node_str = ' '.join([f"{user}@{n}" for n in res.group(1).split()])
+ cmd = re.sub(search_patt, f"--node '{node_str}'", cmd)
+ return cmd
+
+
+def run_command(context, cmd, exit_on_fail=True):
+ cmd = _wrap_cmd_non_root(cmd)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ context.return_code = rc
+ if out:
+ out = re.sub(COLOR_MODE, '', out)
+ context.stdout = out
+ if err:
+ err = re.sub(COLOR_MODE, '', err)
+ context.stderr = err
+ if rc != 0 and exit_on_fail:
+ if out:
+ context.logger.info("\n{}\n".format(out))
+ context.logger.error("\n{}\n".format(err))
+ context.failed = True
+ return rc, out, err
+
+
+def run_command_local_or_remote(context, cmd, addr, exit_on_fail=True):
+ if addr == me():
+ return run_command(context, cmd, exit_on_fail)
+ cmd = _wrap_cmd_non_root(cmd)
+ sudoer = userdir.get_sudoer()
+ if sudoer is None:
+ user = None
+ else:
+ user = sudoer
+ cmd = f'sudo {cmd}'
+ hosts = addr.split(',')
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(hosts)) as executor:
+ results = list(executor.map(lambda x: (x, behave_agent.call(x, 1122, cmd, user=user)), hosts))
+ out = utils.to_ascii(results[0][1][1])
+ err = utils.to_ascii(results[0][1][2])
+ context.stdout = out
+ context.stderr = err
+ context.return_code = 0
+ for host, (rc, stdout, stderr) in results:
+ if rc != 0:
+ err = re.sub(COLOR_MODE, '', utils.to_ascii(stderr))
+ context.stderr = err
+ if exit_on_fail:
+ import os
+ context.logger.error("Failed to run %s on %s@%s :%s", cmd, os.geteuid(), host, err)
+ raise ValueError("{}".format(err))
+ else:
+ return
+ return 0, out, err
+
+
+def check_service_state(context, service_name, state, addr):
+ if state not in ["started", "stopped", "enabled", "disabled"]:
+ context.logger.error("\nService state should be \"started/stopped/enabled/disabled\"\n")
+ context.failed = True
+ if state in {'enabled', 'disabled'}:
+ rc, _, _ = behave_agent.call(addr, 1122, f'systemctl is-enabled {service_name}', 'root')
+ return (state == 'enabled') == (rc == 0)
+ elif state in {'started', 'stopped'}:
+ rc, _, _ = behave_agent.call(addr, 1122, f'systemctl is-active {service_name}', 'root')
+ return (state == 'started') == (rc == 0)
+ else:
+ context.logger.error("\nService state should be \"started/stopped/enabled/disabled\"\n")
+ raise ValueError("Service state should be \"started/stopped/enabled/disabled\"")
+
+
+def check_cluster_state(context, state, addr):
+ return check_service_state(context, 'pacemaker.service', state, addr)
+
+
+def is_unclean(node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ return "{}: UNCLEAN".format(node) in out
+
+
+def online(context, nodelist):
+ rc = True
+ _, out = ShellUtils().get_stdout("sudo crm_node -l")
+ for node in nodelist.split():
+ node_info = "{} member".format(node)
+ if not node_info in out:
+ rc = False
+ context.logger.error("\nNode \"{}\" not online\n".format(node))
+ return rc
+
+def assert_eq(expected, actual):
+ if expected != actual:
+ msg = "\033[32m" "Expected" "\033[31m" " != Actual" "\033[0m" "\n" \
+ "\033[32m" "Expected:" "\033[0m" " {}\n" \
+ "\033[31m" "Actual:" "\033[0m" " {}".format(expected, actual)
+ if isinstance(expected, str) and '\n' in expected:
+ try:
+ diff = '\n'.join(difflib.unified_diff(
+ expected.splitlines(),
+ actual.splitlines(),
+ fromfile="expected",
+ tofile="actual",
+ lineterm="",
+ ))
+ msg = "{}\n" "\033[31m" "Diff:" "\033[0m" "\n{}".format(msg, diff)
+ except Exception:
+ pass
+ raise AssertionError(msg)
+
+def assert_in(expected, actual):
+ if expected not in actual:
+ msg = "\033[32m" "Expected" "\033[31m" " not in Actual" "\033[0m" "\n" \
+ "\033[32m" "Expected:" "\033[0m" " {}\n" \
+ "\033[31m" "Actual:" "\033[0m" " {}".format(expected, actual)
+ raise AssertionError(msg)
diff --git a/test/features/user_access.feature b/test/features/user_access.feature
new file mode 100644
index 0000000..180dd3f
--- /dev/null
+++ b/test/features/user_access.feature
@@ -0,0 +1,114 @@
+@user
+Feature: Functional test for user access
+
+ Need nodes: hanode1
+
+ Scenario: User in haclient group
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash -N -g 90 xin1" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~xin1/.bashrc" on "hanode1"
+ When Try "su - xin1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ ERROR: Please run this command starting with "sudo".
+ Currently, this command needs to use sudo to escalate itself as root.
+ Please consider to add "xin1" as sudoer. For example:
+ sudo bash -c 'echo "xin1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin1'
+ """
+ When Run "echo "xin1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin1" on "hanode1"
+ When Try "su - xin1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin1 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Run "su - xin1 -c 'crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ @clean
+ Scenario: User in sudoer
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin3" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~xin3/.bashrc" on "hanode1"
+ And Run "echo "xin3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin3" on "hanode1"
+ When Try "su - xin3 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin3 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Try "su - xin3 -c 'crm node standby hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin3 -c 'sudo crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ @clean
+ Scenario: Normal user access
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash user1" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user1/.bashrc" on "hanode1"
+ When Try "su - user1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo".
+ Currently, this command needs to use sudo to escalate itself as root.
+ Please consider to add "user1" as sudoer. For example:
+ sudo bash -c 'echo "user1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user1'
+ """
+ When Run "echo "user1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user1" on "hanode1"
+ When Try "su - user1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - user1 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Run "useradd -m -s /bin/bash user2" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user2/.bashrc" on "hanode1"
+ When Try "su - user2 -c 'crm node standby hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: This command needs higher privilege.
+ Option 1) Please consider to add "user2" as sudoer. For example:
+ sudo bash -c 'echo "user2 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user2'
+ Option 2) Add "user2" to the haclient group. For example:
+ sudo usermod -g haclient user2
+ """
+ When Run "usermod -g haclient user2" on "hanode1"
+ When Run "su - user2 -c 'crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ When Run "useradd -m -s /bin/bash user3" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user3/.bashrc" on "hanode1"
+ When Try "su - user3 -c 'crm node online hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: This command needs higher privilege.
+ Option 1) Please consider to add "user3" as sudoer. For example:
+ sudo bash -c 'echo "user3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user3'
+ Option 2) Add "user3" to the haclient group. For example:
+ sudo usermod -g haclient user3
+ """
+ When Run "echo "user3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user3" on "hanode1"
+ When Try "su - user3 -c 'crm node online hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - user3 -c 'sudo crm node online hanode1'" on "hanode1"
+ Then Node "hanode1" is online
diff --git a/test/history-test.tar.bz2 b/test/history-test.tar.bz2
new file mode 100644
index 0000000..38b37d0
--- /dev/null
+++ b/test/history-test.tar.bz2
Binary files differ
diff --git a/test/list-undocumented-commands.py b/test/list-undocumented-commands.py
new file mode 100755
index 0000000..4729b48
--- /dev/null
+++ b/test/list-undocumented-commands.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python3
+#
+# Script to discover and report undocumented commands.
+
+from crmsh.ui_root import Root
+from crmsh import help
+
+help.HELP_FILE = "doc/crm.8.adoc"
+help._load_help()
+
+_IGNORED_COMMANDS = ('help', 'quit', 'cd', 'up', 'ls')
+
+
+def check_help(ui):
+ for name, child in ui.children().items():
+ if child.type == 'command':
+ try:
+ h = help.help_command(ui.name, name)
+ if h.generated and name not in _IGNORED_COMMANDS:
+ print("Undocumented: %s %s" % (ui.name, name))
+ except:
+ print("Undocumented: %s %s" % (ui.name, name))
+ elif child.type == 'level':
+ h = help.help_level(name)
+ if h.generated:
+ print("Undocumented: %s %s" % (ui.name, name))
+ check_help(child.level)
+
+check_help(Root())
diff --git a/test/profile-history.sh b/test/profile-history.sh
new file mode 100755
index 0000000..02831f8
--- /dev/null
+++ b/test/profile-history.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+case $1 in
+ cumulative)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"cumulative\").print_stats()" | less
+ ;;
+ time)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\").print_stats()" | less
+ ;;
+ timecum)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\", \"cum\").print_stats()" | less
+ ;;
+ callers)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.print_callers(.5, \"$3\")" | less
+ ;;
+ verbose)
+ PYTHONPATH=. ./crm -X "$2" -H "$3" history log
+ ;;
+ *)
+ PYTHONPATH=. ./crm -X "$1" -H "$2" history log >/dev/null
+ ;;
+esac
diff --git a/test/regression.sh b/test/regression.sh
new file mode 100755
index 0000000..ec1a416
--- /dev/null
+++ b/test/regression.sh
@@ -0,0 +1,199 @@
+#!/bin/sh
+# Copyright (C) 2007 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+rootdir="$(dirname "$0")"
+TESTDIR=${TESTDIR:-$rootdir/testcases}
+DFLT_TESTSET=basicset
+OUTDIR=${OUTDIR:-crmtestout}
+CRM_OUTF="$OUTDIR/crm.out"
+CRM_LOGF="$OUTDIR/crm.log"
+CRM_DEBUGF="$OUTDIR/crm.debug"
+OUTF="$OUTDIR/regression.out"
+DIFF_OPTS="--ignore-all-space -U 1"
+common_filter=$TESTDIR/common.filter
+common_exclf=$TESTDIR/common.excl
+export OUTDIR
+
+logmsg() {
+ echo "$(date): $*" | tee -a "$CRM_DEBUGF" | tee -a "$CRM_LOGF"
+}
+abspath() {
+ echo "$1" | grep -qs "^/" && echo "$1" || echo "$(pwd)/$1"
+}
+
+usage() {
+ cat<<EOF
+
+usage: $0 [-q] [-P] [testcase...|set:testset]
+
+Test crm shell using supplied testcases. If none are given,
+set:basicset is used. All testcases and sets are in testcases/.
+See also README.regression for description.
+
+-q: quiet operation (no progress shown)
+-P: profile test
+
+EOF
+exit 2
+}
+
+if [ ! -d "$TESTDIR" ]; then
+ echo "$0: $TESTDIR does not exit"
+ usage
+fi
+
+rm -f "$CRM_LOGF" "$CRM_DEBUGF"
+
+# make tools/lrmd/stonithd log to our files only
+HA_logfile="$(abspath "$CRM_LOGF")"
+HA_debugfile="$(abspath "$CRM_DEBUGF")"
+HA_use_logd=no
+HA_logfacility=""
+export HA_logfile HA_debugfile HA_use_logd HA_logfacility
+
+mkdir -p "$OUTDIR"
+. /etc/ha.d/shellfuncs
+
+args="$(getopt hqPc:p:m: "$*")"
+[ $? -ne 0 ] && usage
+eval set -- "$args"
+
+output_mode="normal"
+while [ x"$1" != x ]; do
+ case "$1" in
+ -h) usage;;
+ -m) output_mode=$2; shift 1;;
+ -q) output_mode="silent";;
+ -P) do_profile=1;;
+ -c) CRM=$2; export CRM; shift 1;;
+ -p) PATH="$2:$PATH"; export PATH; shift 1;;
+ --) shift 1; break;;
+ *) usage;;
+ esac
+ shift 1
+done
+
+exec >"$OUTF" 2>&1
+
+# Where to send user output
+# evaltest.sh also uses >&3 for printing progress dots
+case $output_mode in
+ silent) exec 3>/dev/null;;
+ buildbot) exec 3>"$CRM_OUTF";;
+ *) exec 3>/dev/tty;;
+esac
+
+setenvironment() {
+ filterf=$TESTDIR/$testcase.filter
+ pref=$TESTDIR/$testcase.pre
+ postf=$TESTDIR/$testcase.post
+ exclf=$TESTDIR/$testcase.excl
+ log_filter=$TESTDIR/$testcase.log_filter
+ expf=$TESTDIR/$testcase.exp
+ outf=$OUTDIR/$testcase.out
+ difff=$OUTDIR/$testcase.diff
+}
+
+filter_output() {
+ { [ -x $common_filter ] && $common_filter || cat;} |
+ { [ -f $common_exclf ] && egrep -vf $common_exclf || cat;} |
+ { [ -x $filterf ] && $filterf || cat;} |
+ { [ -f $exclf ] && egrep -vf $exclf || cat;}
+}
+
+dumpcase() {
+ cat<<EOF
+----------
+testcase $testcase failed
+output is in $outf
+diff (from $difff):
+`cat $difff`
+----------
+EOF
+}
+
+runtestcase() {
+ setenvironment
+ (
+ cd $rootdir
+ [ -x "$pref" ] && $pref >/dev/null 2>&1
+ )
+ echo -n "$testcase" >&3
+ logmsg "BEGIN testcase $testcase"
+ (
+ cd $rootdir
+ ./evaltest.sh $testargs
+ ) < $TESTDIR/$testcase > $outf 2>&1
+
+ perl -pi -e 's/\<cib[^>]*\>/\<cib\>/g' $outf
+
+ filter_output < $outf |
+ if [ "$prepare" ]; then
+ echo " saving to expect file" >&3
+ cat > $expf
+ else
+ (
+ cd $rootdir
+ [ -x "$postf" ] && $postf >/dev/null 2>&1
+ )
+ echo -n " checking..." >&3
+ if head -2 $expf | grep -qs '^<cib'; then
+ crm_diff -o $expf -n -
+ else
+ diff $DIFF_OPTS $expf -
+ fi > $difff
+ if [ $? -ne 0 ]; then
+ echo " FAIL" >&3
+ cat $difff >&3
+ dumpcase
+ return 1
+ else
+ echo " PASS" >&3
+ rm -f $outf $difff
+ fi
+ fi
+ sed -n "/BEGIN testcase $testcase/,\$p" $CRM_LOGF |
+ { [ -x $log_filter ] && $log_filter || cat;} |
+ egrep '(CRIT|ERROR):'
+ logmsg "END testcase $testcase"
+}
+
+[ "$1" = prepare ] && { prepare=1; shift 1;}
+[ $# -eq 0 ] && set "set:$DFLT_TESTSET"
+testargs=""
+if [ -n "$do_profile" ]; then
+ if echo $1 | grep -qs '^set:'; then
+ echo you can profile just one test
+ echo 'really!'
+ exit 1
+ fi
+ testargs="prof"
+fi
+
+for a; do
+ if [ "$a" ] && [ -f "$TESTDIR/$a" ]; then
+ testcase=$a
+ runtestcase
+ elif echo "$a" | grep -q "^set:"; then
+ TESTSET="$TESTDIR/$(echo $a | sed 's/set://')"
+ if [ -f "$TESTSET" ]; then
+ while read -r testcase; do
+ runtestcase
+ done < "$TESTSET"
+ else
+ echo "testset $TESTSET does not exist" >&3
+ fi
+ else
+ echo "test $TESTDIR/$a does not exist" >&3
+ fi
+done
+
+res=`grep -E -wv '(BEGIN|END) testcase|warning: stray .* before' "$OUTF"`
+if [ -n "$res" ];then
+ echo "The failed messages: $res"
+ echo "seems like some tests failed or else something not expected"
+ echo "check $OUTF and diff files in $OUTDIR"
+ echo "in case you wonder what lrmd was doing, read $(abspath "$CRM_LOGF") and $(abspath "$CRM_DEBUGF")"
+ exit 1
+fi >&3
diff --git a/test/run-functional-tests b/test/run-functional-tests
new file mode 100755
index 0000000..5c3bca7
--- /dev/null
+++ b/test/run-functional-tests
@@ -0,0 +1,551 @@
+#!/bin/bash
+DOCKER_IMAGE=${DOCKER_IMAGE:-"nyang23/haleap:15.5"}
+PROJECT_PATH=$(dirname $(dirname `realpath $0`))
+PROJECT_INSIDE="/opt/crmsh"
+DOCKER_SERVICE="docker.service"
+COROSYNC_CONF="/etc/corosync/corosync.conf"
+COROSYNC_AUTH="/etc/corosync/authkey"
+HA_NETWORK_FIRST="ha_network_first"
+HA_NETWORK_SECOND="ha_network_second"
+declare -a HA_NETWORK_ARRAY
+declare -a HA_NETWORK_V6_ARRAY
+HA_NETWORK_ARRAY[0]=$HA_NETWORK_FIRST
+HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND
+HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64"
+HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
+BEHAVE_CASE_DIR="$(dirname $0)/features/"
+BEHAVE_CASE_EXCLUDE="sbd|ocfs2"
+
+read -r -d '' SSHD_CONFIG_AZURE << EOM
+PermitRootLogin no
+AuthorizedKeysFile .ssh/authorized_keys
+ChallengeResponseAuthentication no
+UsePAM yes
+X11Forwarding yes
+ClientAliveInterval 180
+Subsystem sftp /usr/lib/ssh/sftp-server
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL
+PasswordAuthentication no
+EOM
+
+read -r -d '' COROSYNC_CONF_TEMPLATE << EOM
+totem {
+ version: 2
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ interface {
+ ringnumber: 0
+ mcastport: 5405
+ ttl: 1
+ }
+
+ transport: udpu
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ token: 5000
+ join: 60
+ max_messages: 20
+ token_retransmits_before_loss_const: 10
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+nodelist {
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
+EOM
+
+
+fatal() {
+ error $*
+ exit 1
+}
+
+
+error() {
+ echo "ERROR: $*"
+}
+
+
+warning() {
+ echo "WARNING: $*"
+}
+
+
+info() {
+ echo "INFO: $*"
+}
+
+
+is_number() {
+ num=$1
+ test ! -z "$num" && test "$num" -eq "$num" 2> /dev/null && test "$num" -gt 0 2> /dev/null
+}
+
+
+check_docker_env() {
+ # check if docker available
+ systemctl list-unit-files $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not available"
+ fi
+ # check if docker.service started
+ systemctl is-active $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not active"
+ fi
+ # check if docker cgroup driver is systemd
+ docker info 2> /dev/null|grep -q "Cgroup Driver: systemd"
+ if [ "$?" -ne 0 ];then
+ warning "docker cgroup driver suggest to be \"systemd\""
+ fi
+
+ [ "$1" == "cleanup" ] && return
+ # check if ha network already exists
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q "$network"
+ if [ "$?" -eq 0 ];then
+ fatal "HA specific network \"$network\" already exists"
+ fi
+ done
+}
+
+
+get_test_case_array() {
+ test -d $BEHAVE_CASE_DIR || fatal "Cannot find '$BEHAVE_CASE_DIR'"
+ ls $BEHAVE_CASE_DIR|grep "\.feature"|grep -Ev "$BEHAVE_CASE_EXCLUDE"
+}
+
+
+echo_test_cases() {
+ case_arry=`get_test_case_array`
+ echo "Index|File Name|Description"
+ index=1
+ for f in ${case_arry[@]};do
+ desc=`awk -F: '/Feature/{print $2}' $BEHAVE_CASE_DIR/$f`
+ printf "%3s %-40s %-60s\n" $index $f "$desc"
+ index=$(($index+1))
+ done
+ printf "%3s %-40s %-60s\n" $index "regression test" "Original regression test"
+}
+
+
+usage_and_exit() {
+ prog=`basename $0`
+ cat <<END
+Usage: $prog [OPTIONS]|[TESTCASE INDEX]
+$prog is a tool for developers to setup the cluster in containers to run functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+ -h, --help Show this help message and exit
+ -l List existing functional test cases and exit
+ -n NUM Only setup a cluster with NUM nodes(containers)
+ -x Don't config corosync on containers(with -n option)
+ -d Cleanup the cluster containers
+ -u Create normal users, and Azure like ssh environment
+ -q Create a qnetd node(with -n and -x option)
+
+EXAMPLES:
+To launch 2 nodes with the running cluster with the very basic corosync.conf
+# crmsh.git/test/run-functional-tests -n 2
+
+To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
+# crmsh.git/run-functional-tests -n 2 -x
+
+To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
+# crmsh.git/run-functional-tests -n 2 -x -q
+
+To list the existing test cases. Users could add his own new test cases.
+# crmsh.git/test/run-functional-tests -l
+
+To run a single or a number of functional test cases
+# crmsh.git/test/run-functional-tests 1
+# crmsh.git/test/run-functional-tests 1 2 3
+
+To clean up the all containers which are generated by this tool
+# crmsh.git/test/run-functional-tests -d
+END
+ exit 1
+}
+
+
+docker_exec() {
+ name=$1
+ cmd=$2
+ docker exec -t $name /bin/sh -c "$cmd"
+}
+
+set_sshd_config_like_in_azure() {
+ node_name=$1
+ docker_exec $node_name "echo \"$SSHD_CONFIG_AZURE\" > /etc/ssh/sshd_config"
+ docker_exec $node_name "systemctl restart sshd.service"
+}
+
+create_custom_user() {
+ user_name=$1
+ user_id=$2
+ docker_exec $node_name "useradd -m -s /bin/bash ${user_name} 2>/dev/null"
+ docker_exec $node_name "chmod u+w /etc/sudoers"
+ docker_exec $node_name "echo \"${user_name} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers"
+ docker_exec $node_name "chmod u-w /etc/sudoers"
+ docker_exec $node_name "echo 'export PATH=\$PATH:/usr/sbin/' >> ~${user_name}/.bashrc"
+ docker_exec $node_name "echo -e \"linux\\nlinux\" | passwd ${user_name} 2>/dev/null"
+ docker_exec $node_name "cp -r /root/.ssh ~${user_name}/ && chown ${user_name}:haclient -R ~${user_name}/.ssh"
+ info "Create user '$user_name' on $node_name"
+}
+
+create_alice_bob_carol() {
+ # Custom users are alice, bob and carol and they are as important as the root
+ # and eventually they should be already in the docker image
+ # However now, let's create them here
+ create_custom_user "alice" "1000"
+ create_custom_user "bob" "1001"
+ create_custom_user "carol" "1002"
+}
+
+deploy_ha_node() {
+ node_name=$1
+ docker_options="-d --name $node_name -h $node_name --privileged --shm-size 1g"
+ make_cmd="cd $PROJECT_INSIDE;./autogen.sh && ./configure --prefix /usr && make install && make install-crmconfDATA prefix= && cp /usr/bin/crm /usr/sbin"
+
+ info "Deploying \"$node_name\"..."
+ docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network connect $network $node_name
+ done
+
+ if [ "$node_name" != "qnetd-node" ];then
+ rm_qnetd_cmd="rpm -q corosync-qnetd && rpm -e corosync-qnetd"
+ docker_exec $node_name "$rm_qnetd_cmd" &> /dev/null
+ fi
+ docker_exec $node_name "rm -rf /run/nologin"
+ docker_exec $node_name "echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"
+
+ if [ "$node_name" != "qnetd-node" ];then
+ docker cp $PROJECT_PATH $node_name:/opt/crmsh
+ info "Building crmsh on \"$node_name\"..."
+ docker_exec $node_name "$make_cmd" 1> /dev/null || \
+ fatal "Building failed on $node_name!"
+ docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
+ docker_exec $node_name "chmod g+w -R /var/log/crmsh"
+ create_alice_bob_carol
+ if [ "$NORMAL_USER_FLAG" -eq 1 ];then
+ set_sshd_config_like_in_azure $node_name
+ fi
+ else
+ docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
+ docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
+ docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
+ info "Create user 'alice' on $node_name"
+ [ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
+ fi
+}
+
+
+create_node() {
+ info "Loading docker image $DOCKER_IMAGE..."
+ docker pull $DOCKER_IMAGE &> /dev/null
+
+ for index in ${!HA_NETWORK_ARRAY[@]};do
+ network=${HA_NETWORK_ARRAY[$index]}
+ info "Create ha specific docker network \"$network\"..."
+ docker network create --ipv6 --subnet ${HA_NETWORK_V6_ARRAY[$index]} $network &> /dev/null
+ done
+
+ for node in $*;do
+ deploy_ha_node $node &
+ done
+ wait
+}
+
+
+config_cluster() {
+ node_num=$#
+ insert_str=""
+ container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f '{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
+
+ for i in $(seq $node_num -1 1);do
+ ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
+ insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: $i\n\t}"
+ done
+ corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo "$COROSYNC_CONF_TEMPLATE"))
+ if [ $node_num -eq 2 ];then
+ corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 1" <(echo "$corosync_conf_str"))
+ fi
+
+ info "Copy corosync.conf to $*"
+ for node in $*;do
+ if [ $node == $1 ];then
+ docker_exec $1 "echo \"$corosync_conf_str\" >> $COROSYNC_CONF"
+ docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
+ else
+ while :
+ do
+ docker_exec $1 "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
+ sleep 1
+ done
+ docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
+ fi
+ done
+}
+
+
+start_cluster() {
+ for node in $*;do
+ docker_exec $node "crm cluster enable && crm cluster start" 1> /dev/null
+ if [ "$?" -eq 0 ];then
+ info "Cluster service started on \"$node\""
+ else
+ fatal "Failed to start cluster service on \"$node\""
+ fi
+ done
+}
+
+
+container_already_exists() {
+ docker ps -a|grep -q "$1"
+ if [ "$?" -eq 0 ];then
+ fatal "Container \"$1\" already running"
+ fi
+}
+
+
+setup_cluster() {
+ hanodes_arry=()
+ is_number $1
+ if [ "$?" -eq 0 ];then
+ for i in $(seq 1 $1);do
+ hanodes_arry+=("hanode$i")
+ done
+ else
+ hanodes_arry=($*)
+ fi
+
+ if [ $WITH_QNETD_NODE -eq 1 ];then
+ create_node ${hanodes_arry[@]} "qnetd-node"
+ else
+ create_node ${hanodes_arry[@]}
+ fi
+
+ [ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
+ config_cluster ${hanodes_arry[@]}
+ start_cluster ${hanodes_arry[@]}
+ docker_exec "hanode1" "crm configure property stonith-enabled=false"
+}
+
+
+cleanup_container() {
+ node=$1
+ info "Cleanup container \"$node\"..."
+ docker container stop $node &> /dev/null
+ docker container rm $node &> /dev/null
+}
+
+
+cleanup_cluster() {
+ exist_network_array=()
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q $network && exist_network_array+=($network)
+ done
+ if [ ${#exist_network_array[@]} -eq 0 ];then
+ info "Already cleaned up"
+ return 0
+ fi
+
+ container_array=(`docker network inspect $exist_network_array -f '{{range .Containers}}{{printf "%s " .Name}}{{end}}'`)
+ for node in ${container_array[@]};do
+ cleanup_container $node &
+ done
+ wait
+
+ for network in ${exist_network_array[@]};do
+ info "Cleanup ha specific docker network \"$network\"..."
+ docker network rm $network &> /dev/null
+ done
+}
+
+
+adjust_test_case() {
+ node_name=$1
+ replace_arry=(`docker_exec $node_name "grep -o -E '@(hanode[0-9]+|qnetd-node)\.ip[6]?\.(default|[0-9])' $2|sort -u|dos2unix"`)
+ for item in ${replace_arry[@]};do
+ item_str=${item##@}
+ node=`echo $item_str|cut -d "." -f 1`
+ ip_version=`echo $item_str|cut -d "." -f 2|tr -d "\r"`
+ ip_search_str="IPAddress"
+ if [ "$ip_version" == "ip6" ];then
+ ip_search_str="GlobalIPv6Address"
+ fi
+ index=`echo $item_str|cut -d "." -f 3|tr -d "\r"`
+ if [ "$index" == "default" ];then
+ ip=`docker container inspect $node -f "{{range .NetworkSettings.Networks}}{{printf \"%s \" .$ip_search_str}}{{end}}"|awk '{print $1}'|tr -d "\r"`
+ else
+ ip=`docker container inspect $node -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[$index]}.$ip_search_str}}"|tr -d "\r"`
+ fi
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$ip/g $2"
+ done
+
+ vip_replace_array=(`docker_exec $node_name "grep -o -E '@vip\.[0-9]' $2|sort -u|dos2unix"`)
+ for item in ${vip_replace_array[@]};do
+ index=`echo $item|cut -d "." -f 2|tr -d "\r"`
+ suffix=$((123+index))
+ ip=`docker container inspect $node_name -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[0]}.IPAddress}}"|tr -d "\r"`
+ vip=`echo $ip|sed "s/\.[0-9][0-9]*$/\.$suffix/g"|tr -d "\r"`
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$vip/g $2"
+ done
+}
+
+
+run_origin_regression_test() {
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster "hanode1"
+ docker_exec "hanode1" "sh /usr/share/crmsh/tests/regression.sh"
+ return $?
+}
+
+
+prepare_coverage_env() {
+ for node in $*; do
+ docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
+ done
+}
+
+
+fetch_coverage_report() {
+ local unique_id=$(dd if=/dev/urandom count=1 bs=6 | base64 | tr '+/' '-_')
+ for node in $*; do
+ docker_exec "$node" "coverage combine; coverage xml -o /opt/coverage.xml"
+ # see https://github.com/codecov/codecov-cli/blob/master/codecov_cli/services/upload/coverage_file_finder.py
+ docker cp "$node":/opt/coverage.xml coverage."$unique_id"."$node".xml
+ done
+}
+
+
+WITH_QNETD_NODE=0
+NORMAL_USER_FLAG=0
+CONFIG_COROSYNC_FLAG=1
+SETUP_N_NODES_CLUSTER=0
+options=$(getopt -l "help" -o "hldxuqn:" -- "$@")
+eval set -- "$options"
+while true;do
+case $1 in
+-h|--help) usage_and_exit;;
+-l)
+ echo_test_cases
+ exit 0
+ ;;
+-d)
+ check_docker_env cleanup
+ cleanup_cluster
+ exit $?
+ ;;
+-x)
+ CONFIG_COROSYNC_FLAG=0
+ shift
+ ;;
+-u)
+ NORMAL_USER_FLAG=1
+ shift
+ ;;
+-q)
+ WITH_QNETD_NODE=1
+ shift
+ ;;
+-n)
+ check_docker_env
+ shift
+ is_number $1 || fatal "-n option need a number larger than 0"
+ SETUP_N_NODES_CLUSTER=$1
+ shift
+ ;;
+--)
+ shift
+ break
+ ;;
+esac
+done
+
+if [ $SETUP_N_NODES_CLUSTER -ge 1 ];then
+ setup_cluster $SETUP_N_NODES_CLUSTER
+ exit $?
+fi
+
+if [ "$#" -eq 0 ];then
+ usage_and_exit
+fi
+
+# used by github action
+if [ "$1" == "_get_index_of" ];then
+ shift
+ pattern=""
+ for item in $*;do
+ pattern+="${item}|"
+ done
+ echo_test_cases|grep -E "(${pattern%%|})(\.feature|\s+Original regression test)"|awk '{print $1}'|tr -s '\n' ' '
+ exit 0
+fi
+
+for case_num in $*;do
+ echo_test_cases|grep -E "\s+$case_num\s" &> /dev/null
+ if [ "$?" -ne 0 ];then
+ error "\"$case_num\" is an invalid index"
+ echo_test_cases
+ exit 1
+ fi
+done
+
+for case_num in $*;do
+ if [ "$case_num" -ne $1 ];then
+ check_docker_env cleanup
+ cleanup_cluster
+ echo
+ fi
+ check_docker_env
+ test_case_array=(`get_test_case_array`)
+ if [ $case_num -gt ${#test_case_array[*]} ];then
+ run_origin_regression_test || exit 1
+ continue
+ fi
+ case_file=$BEHAVE_CASE_DIR/${test_case_array[$((case_num-1))]}
+ case_file_in_container="$PROJECT_INSIDE/test/features/`basename $case_file`"
+ node_arry=(`awk -F: '/Need nodes/{print $2}' $case_file`)
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster ${node_arry[@]}
+ adjust_test_case ${node_arry[0]} $case_file_in_container
+ echo
+ prepare_coverage_env "${node_arry[@]}"
+ if [ "$NORMAL_USER_FLAG" -eq 0 ];then
+ info "Running \"$case_file_in_container\" under 'root'..."
+ docker_exec ${node_arry[0]} "behave --no-logcapture $case_file_in_container || exit 1" || exit 1
+ else
+ info "Running \"$case_file_in_container\" under normal user 'alice'..."
+ docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
+ fi
+ fetch_coverage_report "${node_arry[@]}"
+ echo
+done
diff --git a/test/testcases/acl b/test/testcases/acl
new file mode 100644
index 0000000..ebc9531
--- /dev/null
+++ b/test/testcases/acl
@@ -0,0 +1,60 @@
+show ACL
+node node1
+property enable-acl=true
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d0 ocf:pacemaker:Dummy
+primitive d1 ocf:pacemaker:Dummy
+role basic-read \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+role basic-read-basic \
+ read cib
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny tag:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+acl_target cyrus cyrus-role
+_test
+verify
+.
diff --git a/test/testcases/acl.excl b/test/testcases/acl.excl
new file mode 100644
index 0000000..31d13f7
--- /dev/null
+++ b/test/testcases/acl.excl
@@ -0,0 +1 @@
+INFO: 5: already using schema pacemaker-1.2
diff --git a/test/testcases/acl.exp b/test/testcases/acl.exp
new file mode 100644
index 0000000..f00405c
--- /dev/null
+++ b/test/testcases/acl.exp
@@ -0,0 +1,94 @@
+.TRY ACL
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: property enable-acl=true
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d0 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: role basic-read read status read type:node attribute:uname read type:node attribute:type read property
+.INP: role basic-read-basic read cib
+.INP: role d0-admin write meta:d0:target-role write meta:d0:is-managed read ref:d0
+.INP: role silly-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read utilization:d0 read property:stonith-enabled write property read node read node:node1 read nodeattr read nodeattr:a1 read nodeutil read nodeutil:node1 read status read cib
+.INP: role silly-role-two read xpath:"//nodes//attributes" deny tag:nvpair deny ref:d0
+.INP: acl_target alice basic-read-basic
+.INP: acl_target bob d0-admin basic-read-basic
+.INP: role cyrus-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read property
+.INP: acl_target cyrus cyrus-role
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1
+primitive d0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ enable-acl=true
+role basic-read \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role basic-read-basic \
+ read cib
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny type:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+acl_target cyrus \
+ cyrus-role
+.INP: commit
diff --git a/test/testcases/basicset b/test/testcases/basicset
new file mode 100644
index 0000000..4f023bf
--- /dev/null
+++ b/test/testcases/basicset
@@ -0,0 +1,18 @@
+confbasic
+bundle
+confbasic-xml
+edit
+rset
+rset-xml
+delete
+node
+resource
+file
+shadow
+ra
+acl
+history
+newfeatures
+commit
+bugs
+scripts
diff --git a/test/testcases/bugs b/test/testcases/bugs
new file mode 100644
index 0000000..28219ae
--- /dev/null
+++ b/test/testcases/bugs
@@ -0,0 +1,79 @@
+session Configuration bugs
+options
+sort_elements false
+up
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p4 Dummy
+primitive p3 Dummy
+primitive p2 Dummy
+primitive p1 Dummy
+colocation c1 inf: p1 p2
+filter "sed 's/p1 p2/& p3/'" c1
+show c1
+delete c1
+colocation c2 inf: [ p1 p2 ] p3 p4
+filter "sed 's/\\\[/\\\(/;s/\\\]/\\\)/'" c2
+show c2
+primitive p5 Dummy
+primitive p6 Dummy
+clone cl-p5 p5
+show
+commit
+_test
+verify
+show
+.
+session Unordered load file
+options
+sort_elements false
+up
+configure
+load update bugs-test.txt
+show
+commit
+_test
+verify
+.
+session Unknown properties
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+show
+commit
+_test
+verify
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+show
+commit
+_test
+verify
+.
+session template
+configure
+erase
+node node1
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+template
+new vip virtual-ip params ip=10.10.10.123
+load vip
+apply update
+up
+commit
+_test
+verify
+.
diff --git a/test/testcases/bugs.exp b/test/testcases/bugs.exp
new file mode 100644
index 0000000..af05e82
--- /dev/null
+++ b/test/testcases/bugs.exp
@@ -0,0 +1,215 @@
+.TRY Configuration bugs
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: erase
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p4 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p3 Dummy
+.INP: primitive p2 Dummy
+.INP: primitive p1 Dummy
+.INP: colocation c1 inf: p1 p2
+.INP: filter "sed 's/p1 p2/& p3/'" c1
+.INP: show c1
+colocation c1 inf: p1 p2 p3
+.INP: delete c1
+.INP: colocation c2 inf: [ p1 p2 ] p3 p4
+.INP: filter "sed 's/\[/\(/;s/\]/\)/'" c2
+.INP: show c2
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: primitive p5 Dummy
+.INP: primitive p6 Dummy
+.INP: clone cl-p5 p5
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.TRY Unordered load file
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: load update bugs-test.txt
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive gr1 Dummy
+primitive gr2 Dummy
+primitive gr3 Dummy
+primitive gr4 Dummy
+group g1 gr1 gr2
+group g2 gr3
+group g3 gr4
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+location loc1 g1 \
+ rule 200: #uname eq node1
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: _test
+.INP: verify
+.TRY Unknown properties
+.INP: configure
+.INP: erase
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: modified location:loc1 from g1 to gr2
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: property SAPHanaSR: hana_ha1_site_lss_WDF1=4
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: property SAPHanaSR_2: hana_ha1_site_iss_WDF1=cde hana_ha1_site_bss_WDF1=abc
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+.INP: commit
+.INP: _test
+.INP: verify
+.TRY template
+.INP: configure
+.INP: erase
+.INP: node node1
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: template
+.INP: new vip virtual-ip params ip=10.10.10.123
+INFO: 6: pulling in template virtual-ip
+.INP: load vip
+.INP: apply update
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr
+.EXT crm_resource --list-ocf-alternatives IPaddr
+.INP: up
+.INP: commit
+.INP: _test
+.INP: verify
diff --git a/test/testcases/bundle b/test/testcases/bundle
new file mode 100644
index 0000000..463687d
--- /dev/null
+++ b/test/testcases/bundle
@@ -0,0 +1,20 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+property stonith-enabled=true
+_test
+verify
+.
diff --git a/test/testcases/bundle.exp b/test/testcases/bundle.exp
new file mode 100644
index 0000000..f6284ce
--- /dev/null
+++ b/test/testcases/bundle.exp
@@ -0,0 +1,57 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+.INP: primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+.INP: property stonith-enabled=true
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive dummy Dummy \
+ meta target-role=Stopped \
+ op monitor interval=10 timeout=20s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+bundle bundle-test1 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 \
+ storage storage-mapping id=storage1 target-dir=test source-dir=test \
+ meta target-role=Stopped
+bundle bundle-test2 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 \
+ primitive dummy \
+ meta target-role=Stopped priority=1
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: commit
diff --git a/test/testcases/commit b/test/testcases/commit
new file mode 100644
index 0000000..67b27c3
--- /dev/null
+++ b/test/testcases/commit
@@ -0,0 +1,39 @@
+show Commits of all kinds
+op_defaults timeout=2m
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m
+commit
+node node1 \
+ attributes mem=16G
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+primitive p2 ocf:heartbeat:Dummy
+primitive p3 ocf:heartbeat:Dummy
+group g1 p1 p2
+clone c1 g1
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+commit
+rename p3 pp3
+commit
+rename pp3 p3
+delete c1
+commit
+group g2 d1 d2
+commit
+delete g2
+commit
+filter "sed '/g1/s/p1/d1/'"
+group g2 d3 d2
+delete d2
+commit
+_test
+verify
+.
diff --git a/test/testcases/commit.exp b/test/testcases/commit.exp
new file mode 100644
index 0000000..59d291c
--- /dev/null
+++ b/test/testcases/commit.exp
@@ -0,0 +1,90 @@
+.TRY Commits of all kinds
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: primitive st stonith:null params hostlist='node1' meta yoyo-meta="yoyo 2" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: commit
+WARNING: 7: st: unknown attribute 'yoyo-meta'
+.INP: node node1 attributes mem=16G
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p2 ocf:heartbeat:Dummy
+.INP: primitive p3 ocf:heartbeat:Dummy
+.INP: group g1 p1 p2
+.INP: clone c1 g1
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: commit
+.INP: rename p3 pp3
+INFO: 21: modified location:l1 from p3 to pp3
+INFO: 21: modified order:o1 from p3 to pp3
+INFO: 21: modified colocation:cl1 from p3 to pp3
+.INP: commit
+.INP: rename pp3 p3
+INFO: 23: modified location:l1 from pp3 to p3
+INFO: 23: modified order:o1 from pp3 to p3
+INFO: 23: modified colocation:cl1 from pp3 to p3
+.INP: delete c1
+INFO: 24: modified order:o1 from c1 to g1
+INFO: 24: modified colocation:cl1 from c1 to g1
+.INP: commit
+.INP: group g2 d1 d2
+.INP: commit
+.INP: delete g2
+.INP: commit
+.INP: filter "sed '/g1/s/p1/d1/'"
+.INP: group g2 d3 d2
+.INP: delete d2
+.INP: commit
+.INP: _test
+.INP: verify
+WARNING: 35: st: unknown attribute 'yoyo-meta'
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 p2
+group g2 d3
+colocation cl1 inf: g1 p3
+location l1 p3 100: node1
+order o1 Mandatory: p3 g1
+op_defaults op-options: \
+ timeout=2m
+.INP: commit
+INFO: 37: apparently there is nothing to commit
+INFO: 37: try changing something first
diff --git a/test/testcases/common.excl b/test/testcases/common.excl
new file mode 100644
index 0000000..4902553
--- /dev/null
+++ b/test/testcases/common.excl
@@ -0,0 +1,26 @@
+Could not send fail-count-p0=\(null\) update via attrd: connection failed
+Could not send fail-count-p0=<none> update via attrd: connection failed
+Could not send s1=\(null\) update via attrd: connection failed
+Could not send s1=<none> update via attrd: connection failed
+Error performing operation: The object/attribute does not exist
+Error setting fail-count-p0=5 \(section=status, set=status-node1\): The object/attribute does not exist
+Error setting s1=1 2 3 \(section=status, set=status-node1\): The object/attribute does not exist
+Error signing on to the CRMd service
+Error connecting to the controller
+Error performing operation: Transport endpoint is not connected
+Error performing operation: Not connected
+.EXT crm_resource --list-ocf-providers
+.EXT crm_resource --list-ocf-alternatives Delay
+.EXT crm_resource --list-ocf-alternatives Dummy
+^\.EXT crmd version
+^\.EXT cibadmin \-Ql
+^\.EXT crm_verify \-VV \-p
+^\.EXT cibadmin \-p \-P
+^\.EXT crm_diff \-\-help
+^\.EXT crm_diff \-o [^ ]+ \-n \-
+^\.EXT crm_diff \-\-no\-version \-o [^ ]+ \-n \-
+^\.EXT sed ['][^']+
+^\.EXT sed ["][^"]+
+^\.EXT [a-zA-Z]+ validate-all
+^[ ]+File ["][^"]+
+^.*\: ([0-9]+\: )?\(cluster\_status\) warning\: Fencing and resource management disabled due to lack of quorum
diff --git a/test/testcases/common.filter b/test/testcases/common.filter
new file mode 100755
index 0000000..03846c2
--- /dev/null
+++ b/test/testcases/common.filter
@@ -0,0 +1,9 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\.EXT \/(.+)/ { gsub(/\/.*\//, "", $2) }
+/\.EXT >\/dev\/null 2>&1 \/(.+)/ { gsub(/\/.*\//, "", $4) }
+/\.EXT pacemaker-fenced/ { gsub(/pacemaker-fenced/,"stonithd") }
+/\.EXT pacemaker-controld/ { gsub(/pacemaker-controld/,"crmd") }
+/\.EXT pacemaker-schedulerd/ { gsub(/pacemaker-schedulerd/,"pengine") }
+/\.EXT pacemaker-based/ { gsub(/pacemaker-based/,"cib") }
+{ print }
diff --git a/test/testcases/confbasic b/test/testcases/confbasic
new file mode 100644
index 0000000..b06016b
--- /dev/null
+++ b/test/testcases/confbasic
@@ -0,0 +1,91 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+primitive d7 Dummy \
+ params rule inf: #uname eq node1 fake=1 \
+ params rule inf: #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m
+primitive d8 ocf:pacemaker:Dummy
+clone m7 d8 \
+ meta promotable=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date spec years="2009" hours="09-17"
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+tag t1: m5 m6
+set d2.mondelay 45
+_test
+verify
+.
+-F node maintenance node1
+-F resource maintenance g1 off
+-F resource maintenance d1
+-F configure property maintenance-mode=true
diff --git a/test/testcases/confbasic-xml b/test/testcases/confbasic-xml
new file mode 100644
index 0000000..58433f5
--- /dev/null
+++ b/test/testcases/confbasic-xml
@@ -0,0 +1,72 @@
+showxml Basic configure (xml dump)
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt 2009-05-26 and \
+ date in start=2009-05-26 end=2009-07-26 and \
+ date in start=2009-05-26 years=2009 and \
+ date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 record-pending=true
+_test
+verify
+.
diff --git a/test/testcases/confbasic-xml.exp b/test/testcases/confbasic-xml.exp
new file mode 100644
index 0000000..20892dc
--- /dev/null
+++ b/test/testcases/confbasic-xml.exp
@@ -0,0 +1,206 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="true" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ <cluster_property_set id="cpset2">
+ <nvpair name="maintenance-mode" value="true" id="cpset2-maintenance-mode"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="node1-instance_attributes">
+ <nvpair name="mem" value="16G" id="node1-instance_attributes-mem"/>
+ </instance_attributes>
+ </node>
+ <node uname="node2" id="node2">
+ <utilization id="node2-utilization">
+ <nvpair name="cpu" value="4" id="node2-utilization-cpu"/>
+ </utilization>
+ </node>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <meta_attributes id="st-meta_attributes">
+ <nvpair name="target-role" value="Started" id="st-meta_attributes-target-role"/>
+ <nvpair name="requires" value="nothing" id="st-meta_attributes-requires"/>
+ </meta_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" interval="60m" timeout="60s" id="st-monitor-60m"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="st2" class="stonith" type="ssh">
+ <instance_attributes id="st2-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st2-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="20" interval="3600" id="st2-monitor-3600"/>
+ <op name="start" timeout="20" interval="0s" id="st2-start-0s"/>
+ <op name="stop" timeout="15" interval="0s" id="st2-stop-0s"/>
+ </operations>
+ </primitive>
+ <group id="g1">
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations id="d1-ops">
+ <op name="monitor" interval="60m" timeout="20s" id="d1-ops-monitor-60m"/>
+ <op name="monitor" interval="120m" timeout="20s" id="d1-ops-monitor-120m">
+ <instance_attributes id="d1-ops-monitor-120m-instance_attributes">
+ <nvpair name="OCF_CHECK_LEVEL" value="10" id="d1-ops-monitor-120m-instance_attributes-OCF_CHECK_LEVEL"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="d1-ops-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-ops-stop-0s"/>
+ <op name="monitor" interval="60s" timeout="30s" id="d1-monitor-60s"/>
+ </operations>
+ </primitive>
+ <primitive id="d2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="d2-instance_attributes">
+ <nvpair name="mondelay" value="60" id="d2-instance_attributes-mondelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="d2-start-0s"/>
+ <op name="stop" timeout="60s" interval="0s" id="d2-stop-0s"/>
+ <op name="monitor" timeout="30s" interval="10s" id="d2-monitor-10s"/>
+ <op name="monitor" role="Started" interval="60s" timeout="30s" id="d2-monitor-60s"/>
+ </operations>
+ </primitive>
+ </group>
+ <clone id="c">
+ <meta_attributes id="c-meta_attributes">
+ <nvpair name="clone-max" value="1" id="c-meta_attributes-clone-max"/>
+ <nvpair name="interleave" value="true" id="c-meta_attributes-interleave"/>
+ </meta_attributes>
+ <primitive id="d3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ <master id="m">
+ <primitive id="d4" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d4-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d4-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d4-stop-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m5">
+ <primitive id="s5" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s5-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s5-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s5-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s5-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s5-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m6">
+ <primitive id="s6" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s6-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s6-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s6-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s6-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s6-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s6-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+ <constraints>
+ <rsc_location id="l1" rsc="g1" score="100" node="node1"/>
+ <rsc_location id="l2" rsc="c">
+ <rule id="l2-rule1" score="100">
+ <expression operation="eq" attribute="#uname" value="node1" id="l2-rule1-expression"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l3" rsc="m5">
+ <rule score="INFINITY" id="l3-rule">
+ <expression operation="eq" attribute="#uname" value="node1" id="l3-rule-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l3-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l4" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l4-rule">
+ <expression operation="not_defined" attribute="pingd" id="l4-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l4-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l5" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l5-rule">
+ <expression operation="not_defined" attribute="pingd" id="l5-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l5-rule-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-0">
+ <expression operation="eq" attribute="#uname" value="node1" id="l5-rule-0-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l5-rule-0-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-1">
+ <date_expression operation="lt" end="2009-05-26" id="l5-rule-1-expression"/>
+ <date_expression operation="in_range" start="2009-05-26" end="2009-07-26" id="l5-rule-1-expression-0"/>
+ <date_expression operation="in_range" start="2009-05-26" id="l5-rule-1-expression-1">
+ <duration years="2009" id="l5-rule-1-expression-1-duration"/>
+ </date_expression>
+ <date_expression operation="date_spec" id="l5-rule-1-expression-2">
+ <date_spec years="2009" hours="09-17" id="l5-rule-1-expression-2-date_spec"/>
+ </date_expression>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l6" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_location id="l7" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>
+ <rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>
+ <rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>
+ <rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>
+ <rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>
+ <rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence">
+ <resource_set id="ticket-B_m6_m5-0">
+ <resource_ref id="m6"/>
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ <rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence">
+ <resource_set id="ticket-C_master-0">
+ <resource_ref id="m6"/>
+ </resource_set>
+ <resource_set role="Master" id="ticket-C_master-1">
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ </constraints>
+ <fencing-topology>
+ <fencing-level target="node1" index="1" devices="st" id="fencing"/>
+ <fencing-level target="node1" index="2" devices="st2" id="fencing-0"/>
+ <fencing-level target="node2" index="1" devices="st" id="fencing-1"/>
+ <fencing-level target="node2" index="2" devices="st2" id="fencing-2"/>
+ </fencing-topology>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="failure-timeout" value="10m" id="rsc-options-failure-timeout"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="opsdef2">
+ <nvpair name="record-pending" value="true" id="opsdef2-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+</cib>
diff --git a/test/testcases/confbasic-xml.filter b/test/testcases/confbasic-xml.filter
new file mode 100755
index 0000000..7b677da
--- /dev/null
+++ b/test/testcases/confbasic-xml.filter
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -v "WARNING"
diff --git a/test/testcases/confbasic.exp b/test/testcases/confbasic.exp
new file mode 100644
index 0000000..5fc2dff
--- /dev/null
+++ b/test/testcases/confbasic.exp
@@ -0,0 +1,199 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: primitive d1 ocf:pacemaker:Dummy operations $id=d1-ops op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: monitor d1 60s:30s
+.INP: primitive d2 ocf:heartbeat:Delay params mondelay=60 op start timeout=60s op stop timeout=60s
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: monitor d2:Started 60s:30s
+.INP: group g1 d1 d2
+.INP: primitive d3 ocf:pacemaker:Dummy
+.INP: clone c d3 meta clone-max=1
+.INP: primitive d4 ocf:pacemaker:Dummy
+.INP: ms m d4
+WARNING: 19: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: delete m
+.INP: master m d4
+WARNING: 21: This command 'master' is deprecated, please use 'ms'
+INFO: 21: "master" is accepted as "ms"
+WARNING: 21: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: primitive s5 ocf:pacemaker:Stateful operations $id-ref=d1-ops
+.EXT crm_resource --show-metadata ocf:pacemaker:Stateful
+.INP: primitive s6 ocf:pacemaker:Stateful operations $id-ref=d1
+.INP: ms m5 s5
+WARNING: 24: "ms" is deprecated. Please use "clone m5 s5 meta promotable=true"
+.INP: ms m6 s6
+WARNING: 25: "ms" is deprecated. Please use "clone m6 s6 meta promotable=true"
+.INP: primitive d7 Dummy params rule inf: #uname eq node1 fake=1 params rule inf: #uname eq node2 fake=2 op start interval=0 timeout=60s op_params 2: rule #uname eq node1 op_param=dummy op_params 1: op_param=smart op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m op_meta 1: start-delay=60m
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d8 ocf:pacemaker:Dummy
+.INP: clone m7 d8 meta promotable=true meta promoted-max=1 meta promoted-node-max=1
+.INP: location l1 g1 100: node1
+.INP: location l2 c rule $id=l2-rule1 100: #uname eq node1
+.INP: location l3 m5 rule inf: #uname eq node1 and pingd gt 0
+.INP: location l4 m5 rule -inf: not_defined pingd or pingd lte 0
+.INP: location l5 m5 rule -inf: not_defined pingd or pingd lte 0 rule inf: #uname eq node1 and pingd gt 0 rule inf: date lt "2009-05-26" and date in start="2009-05-26" end="2009-07-26" and date in start="2009-05-26" years="2009" and date spec years="2009" hours="09-17"
+.INP: location l6 m5 rule $id-ref=l2-rule1
+.INP: location l7 m5 rule $id-ref=l2
+.INP: collocation c1 inf: m6 m5
+WARNING: 36: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 36: "collocation" is accepted as "colocation"
+.INP: collocation c2 inf: m5:Master d1:Started
+WARNING: 37: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 37: "collocation" is accepted as "colocation"
+.INP: order o1 Mandatory: m5 m6
+.INP: order o2 Optional: d1:start m5:promote
+.INP: order o3 Serialize: m5 m6
+.INP: order o4 Mandatory: m5 m6
+.INP: rsc_ticket ticket-A_m6 ticket-A: m6
+.INP: rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+.INP: rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+.INP: fencing_topology st st2
+.INP: property stonith-enabled=true
+.INP: property $id=cpset2 maintenance-mode=true
+.INP: rsc_defaults failure-timeout=10m
+.INP: op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+.INP: tag t1: m5 m6
+.INP: set d2.mondelay 45
+.INP: _test
+.INP: verify
+WARNING: 53: c2: resource d1 is grouped, constraints should apply to the group
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op monitor interval=60s timeout=30s
+primitive d2 Delay \
+ params mondelay=45 \
+ op start timeout=60s interval=0s \
+ op stop timeout=60s interval=0s \
+ op monitor timeout=30s interval=10s \
+ op monitor role=Started interval=60s timeout=30s
+primitive d3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d7 Dummy \
+ params rule #uname eq node1 fake=1 \
+ params rule #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m \
+ op monitor timeout=20s interval=10s \
+ op stop timeout=20s interval=0s
+primitive d8 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 d2
+ms m d4
+ms m5 s5
+ms m6 s6
+clone c d3 \
+ meta clone-max=1 interleave=true
+clone m7 d8 \
+ meta promotable=true interleave=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+tag t1 m5 m6
+colocation c1 inf: m6 m5
+colocation c2 inf: m5:Master d1:Started
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule #uname eq node1 and pingd gt 0 \
+ rule date lt 2009-05-26 and date in start=2009-05-26 end=2009-07-26 and date in start=2009-05-26 years=2009 and date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2-rule1
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+fencing_topology st st2
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+property cib-bootstrap-options: \
+ stonith-enabled=true
+property cpset2: \
+ maintenance-mode=true
+rsc_defaults rsc-options: \
+ failure-timeout=10m
+op_defaults opsdef2: \
+ rule 100: #uname eq node1 \
+ record-pending=true
+.INP: commit
+WARNING: 55: c2: resource d1 is grouped, constraints should apply to the group
+.TRY -F node maintenance node1
+.TRY -F resource maintenance g1 off
+.TRY -F resource maintenance d1
+.TRY -F configure property maintenance-mode=true
+INFO: 'maintenance' attribute already exists in d1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in node1. Remove it? [YES]
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
diff --git a/test/testcases/delete b/test/testcases/delete
new file mode 100644
index 0000000..7d0dc57
--- /dev/null
+++ b/test/testcases/delete
@@ -0,0 +1,64 @@
+session Delete/Rename test
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+show
+_test
+rename d1 p1
+show
+# delete primitive
+delete d2
+_test
+show
+# delete primitive with constraint
+delete p1
+_test
+show
+primitive d1 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+_test
+# delete primitive belonging to a group
+primitive d2 ocf:pacemaker:Dummy
+_test
+group g1 d2 d1
+delete d2
+show
+_test
+delete g1
+show
+verify
+# delete a group which is in a clone
+primitive d2 ocf:pacemaker:Dummy
+group g1 d2 d1
+clone c1 g1
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+_test
+# delete group from a clone (again)
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+# delete primitive and its group and their clone
+delete d2 d1 c1 g1
+show
+_test
+# verify
+verify
+commit
+.
diff --git a/test/testcases/delete.exp b/test/testcases/delete.exp
new file mode 100644
index 0000000..87b1a7a
--- /dev/null
+++ b/test/testcases/delete.exp
@@ -0,0 +1,194 @@
+.TRY Delete/Rename test
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: _test
+.INP: rename d1 p1
+INFO: 13: modified location:d1-pref from d1 to p1
+.INP: show
+node node1
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive
+.INP: delete d2
+.INP: _test
+.INP: show
+node node1
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive with constraint
+.INP: delete p1
+INFO: 20: hanging location:d1-pref deleted
+.INP: _test
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: _test
+.INP: # delete primitive belonging to a group
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 29: modified location:d1-pref from d1 to g1
+.INP: delete d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+group g1 d1
+location d1-pref g1 100: node1
+.INP: _test
+.INP: delete g1
+INFO: 33: modified location:d1-pref from g1 to d1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: verify
+.INP: # delete a group which is in a clone
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: group g1 d2 d1
+INFO: 38: modified location:d1-pref from d1 to g1
+.INP: clone c1 g1
+INFO: 39: modified location:d1-pref from g1 to c1
+.INP: delete g1
+INFO: 40: modified location:d1-pref from c1 to g1
+INFO: 40: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 43: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 44: modified location:d1-pref from g1 to c1
+.INP: _test
+.INP: # delete group from a clone (again)
+.INP: delete g1
+INFO: 47: modified location:d1-pref from c1 to g1
+INFO: 47: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 50: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 51: modified location:d1-pref from g1 to c1
+.INP: # delete primitive and its group and their clone
+.INP: delete d2 d1 c1 g1
+INFO: 53: modified location:d1-pref from c1 to g1
+INFO: 53: modified location:d1-pref from g1 to d2
+INFO: 53: hanging location:d1-pref deleted
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: _test
+.INP: # verify
+.INP: verify
+.INP: commit
diff --git a/test/testcases/edit b/test/testcases/edit
new file mode 100644
index 0000000..7deb115
--- /dev/null
+++ b/test/testcases/edit
@@ -0,0 +1,95 @@
+show Configuration editing
+op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+filter "sed '$agroup g1 p1 p2'"
+show
+filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+show
+filter "sed '$aclone c1 p2'"
+filter "sed 's/p2/g1/'" c1
+filter "sed '/clone/s/g1/p2/'" c1 g1
+filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+filter "sed '1,$d'" c1 g1
+filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+filter "sed '/cl1/s/p3/p2/'"
+filter "sed '/cl1/d'"
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+group g2 d1 d2
+filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+filter "sed 's/ or mem number:lte 0//'" loc-d1
+filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+modgroup g1 add d3
+modgroup g1 remove p1
+modgroup g1 add p1 after p2
+modgroup g1 remove p1
+modgroup g1 add p1 before p2
+modgroup g1 add p1
+modgroup g1 remove st
+modgroup g1 remove c1
+modgroup g1 remove nosuch
+modgroup g1 add c1
+modgroup g1 add nosuch
+filter "sed 's/^/# this is a comment\\n/'" loc-d1
+rsc_defaults $id="rsc_options" failure-timeout=10m
+filter "sed 's/2m/60s/'" op-options
+show op-options
+property stonith-enabled=true
+show cib-bootstrap-options
+filter 'sed "s/stonith-enabled=true//"'
+show cib-bootstrap-options
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+primitive d6 ocf:heartbeat:Dummy
+order o-d456 d4 d5 d6
+tag t-d45: d4 d5
+show type:order
+show related:d4
+show
+commit
+_test
+verify
+primitive a0 ocf:heartbeat:Dummy
+primitive a1 ocf:heartbeat:Dummy
+primitive a2 ocf:heartbeat:Dummy
+primitive a3 ocf:heartbeat:Dummy
+primitive a4 ocf:heartbeat:Dummy
+primitive a5 ocf:heartbeat:Dummy
+primitive a6 ocf:heartbeat:Dummy
+primitive a7 ocf:heartbeat:Dummy
+primitive a8 ocf:heartbeat:Dummy
+primitive a9 ocf:heartbeat:Dummy
+primitive aErr ocf:heartbeat:Dummy
+group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+commit
+cd ..
+cd configure
+filter "sed '/as/s/a9//'"
+filter "sed '/as/s/a1/a1 a9/'"
+commit
+cd ..
+cd configure
+filter "sed '/abs/s/a9//'"
+filter "sed '/abs/s/a8/a8 a9/'"
+show
+commit
+_test
+verify
+.
diff --git a/test/testcases/edit.excl b/test/testcases/edit.excl
new file mode 100644
index 0000000..3589a25
--- /dev/null
+++ b/test/testcases/edit.excl
@@ -0,0 +1 @@
+^\.EXT sed \-[re] ['][^']
diff --git a/test/testcases/edit.exp b/test/testcases/edit.exp
new file mode 100644
index 0000000..3d3bc0b
--- /dev/null
+++ b/test/testcases/edit.exp
@@ -0,0 +1,437 @@
+.TRY Configuration editing
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: node node1 attributes mem=16G
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+.INP: filter "sed '$agroup g1 p1 p2'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p3
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed '$aclone c1 p2'"
+.INP: filter "sed 's/p2/g1/'" c1
+.INP: filter "sed '/clone/s/g1/p2/'" c1 g1
+.INP: filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+.INP: filter "sed '1,$d'" c1 g1
+.INP: filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: filter "sed '/cl1/s/p3/p2/'"
+.INP: filter "sed '/cl1/d'"
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: group g2 d1 d2
+.INP: filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+ERROR: 29: Cannot create group:g1: Child primitive:d1 already in group:g2
+.INP: filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+.INP: filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: filter "sed 's/ or mem number:lte 0//'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: modgroup g1 add d3
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 after p2
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 before p2
+.INP: modgroup g1 add p1
+ERROR: 1: syntax in group: child p1 listed more than once in group g1 parsing 'group g1 p1 p2 d3 p1'
+.INP: modgroup g1 remove st
+ERROR: 42: configure.modgroup: st is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: st is not member of g1
+.INP: modgroup g1 remove c1
+ERROR: 43: configure.modgroup: c1 is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: c1 is not member of g1
+.INP: modgroup g1 remove nosuch
+ERROR: 44: configure.modgroup: nosuch is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: nosuch is not member of g1
+.INP: modgroup g1 add c1
+ERROR: 45: a group may contain only primitives; c1 is clone
+.INP: modgroup g1 add nosuch
+ERROR: 46: g1 refers to missing object nosuch
+.INP: filter "sed 's/^/# this is a comment\n/'" loc-d1
+.INP: rsc_defaults $id="rsc_options" failure-timeout=10m
+.INP: filter "sed 's/2m/60s/'" op-options
+.INP: show op-options
+op_defaults op-options: \
+ timeout=60s
+.INP: property stonith-enabled=true
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: filter 'sed "s/stonith-enabled=true//"'
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options:
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: primitive d6 ocf:heartbeat:Dummy
+.INP: order o-d456 d4 d5 d6
+.INP: tag t-d45: d4 d5
+.INP: show type:order
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+.INP: show related:d4
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+tag t-d45 d4 d5
+order o-d456 d4 d5 d6
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: _test
+.INP: verify
+.INP: primitive a0 ocf:heartbeat:Dummy
+.INP: primitive a1 ocf:heartbeat:Dummy
+.INP: primitive a2 ocf:heartbeat:Dummy
+.INP: primitive a3 ocf:heartbeat:Dummy
+.INP: primitive a4 ocf:heartbeat:Dummy
+.INP: primitive a5 ocf:heartbeat:Dummy
+.INP: primitive a6 ocf:heartbeat:Dummy
+.INP: primitive a7 ocf:heartbeat:Dummy
+.INP: primitive a8 ocf:heartbeat:Dummy
+.INP: primitive a9 ocf:heartbeat:Dummy
+.INP: primitive aErr ocf:heartbeat:Dummy
+.INP: group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/as/s/a9//'"
+.INP: filter "sed '/as/s/a1/a1 a9/'"
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/abs/s/a9//'"
+.INP: filter "sed '/abs/s/a8/a8 a9/'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 89: apparently there is nothing to commit
+INFO: 89: try changing something first
+.INP: _test
+.INP: verify
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 93: apparently there is nothing to commit
+INFO: 93: try changing something first
diff --git a/test/testcases/file b/test/testcases/file
new file mode 100644
index 0000000..5f215b7
--- /dev/null
+++ b/test/testcases/file
@@ -0,0 +1,14 @@
+configure save sample.txt
+%ext cat sample.txt
+configure erase nodes
+configure load replace sample.txt
+%ext sed -i 's/60s/2m/' sample.txt
+%ext sed -i '8a # comment' sample.txt
+session Load update
+configure
+delete m1 p1
+property cluster-recheck-interval="10m"
+load update sample.txt
+.
+configure show
+%ext rm sample.txt
diff --git a/test/testcases/file.exp b/test/testcases/file.exp
new file mode 100644
index 0000000..dce48de
--- /dev/null
+++ b/test/testcases/file.exp
@@ -0,0 +1,77 @@
+.TRY configure save sample.txt
+.EXT cat sample.txt
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+clone c1 p1 \
+ meta interleave=true
+clone m1 p2 \
+ meta promotable=true interleave=true
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.TRY configure erase nodes
+.TRY configure load replace sample.txt
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT sed -i 's/60s/2m/' sample.txt
+.EXT sed -i '8a # comment' sample.txt
+.TRY Load update
+.INP: configure
+.INP: delete m1 p1
+.INP: property cluster-recheck-interval="10m"
+.INP: load update sample.txt
+ERROR: 4: syntax: Unknown command near <op> parsing 'op stop timeout=20s interval=0s'
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.TRY configure show
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ cluster-recheck-interval=10m
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.EXT rm sample.txt
diff --git a/test/testcases/history b/test/testcases/history
new file mode 100644
index 0000000..383fca8
--- /dev/null
+++ b/test/testcases/history
@@ -0,0 +1,42 @@
+session History
+history
+source history-test.tar.bz2
+info
+events
+node 15sp1-1
+node 15sp1-2
+node .*
+exclude pcmk_peer_update
+exclude
+node 15sp1-2
+exclude clear
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+exclude clear
+peinputs
+peinputs v
+transitions
+refresh
+resource d1
+# reduce report span
+timeframe "2019-03-22 15:07:37"
+peinputs
+resource d1
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+transition log
+transition nograph
+transition -1 nograph
+transition save 0 _crmsh_regtest
+transition log 49
+transition tags 49
+# reset timeframe
+timeframe
+session save _crmsh_regtest
+session load _crmsh_regtest
+session
+session pack
+.
+session History 2
+history
+session load _crmsh_regtest
+exclude
+.
diff --git a/test/testcases/history.excl b/test/testcases/history.excl
new file mode 100644
index 0000000..01f788c
--- /dev/null
+++ b/test/testcases/history.excl
@@ -0,0 +1,3 @@
+^ptest.*:
+^\.EXT tar -C ['][^']+['] -cj -f ['][^']+['] _crmsh_regtest
+^Report saved in ['][^']+
diff --git a/test/testcases/history.exp b/test/testcases/history.exp
new file mode 100644
index 0000000..55cb2c8
--- /dev/null
+++ b/test/testcases/history.exp
@@ -0,0 +1,600 @@
+.TRY History
+.INP: history
+.INP: source history-test.tar.bz2
+.INP: info
+.EXT tar -tj < history-test.tar.bz2 2> /dev/null | head -1
+.EXT tar -xj < history-test.tar.bz2
+Source: history-test.tar.bz2
+Created on: Fri Mar 22 15:08:40 CST 2019
+By: report
+Period: 2019-03-19 01:09:49 - 2019-03-22 23:08:36
+Nodes: 15sp1-1 15sp1-2
+Groups: g1
+Clones:
+Resources: stonith-sbd d1 d2
+Transitions: ... 37* 38* 39* 40* 41 42* 43 44* 45 46 0 48 49* 11 12 13* 15* 16 18 19*
+.INP: events
+2019-03-22T10:56:18.986113+08:00 15sp1-2 mysql(mysql)[2185]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:18.586826+08:00 15sp1-1 mysql(mysql)[4459]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.028197+08:00 15sp1-2 mysql(mysql)[2224]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.082101+08:00 15sp1-2 mysql(mysql)[2259]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.026652+08:00 15sp1-1 pacemaker-schedulerd[1739]: notice: * Recover mysql ( 15sp1-2 -> 15sp1-1 )
+2019-03-22T10:56:19.292370+08:00 15sp1-1 mysql(mysql)[4498]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.646138+08:00 15sp1-1 mysql(mysql)[4533]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T11:02:21.651185+08:00 15sp1-1 pacemakerd[1742]: warning: pacemaker-controld[1749] terminated with signal 9 (core=0)
+2019-03-22T11:45:15.291388+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T11:46:15.982330+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T14:46:29.149904+08:00 15sp1-1 sshd[11637]: error: PAM: Authentication failure for root from 10.67.19.6
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:35:10.376892+08:00 15sp1-2 pacemaker-controld[1750]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:39:50.964158+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:40:41.791107+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=53)
+2019-03-22T10:41:15.144867+08:00 15sp1-2 pacemaker-controld[2965]: notice: Updating quorum status to true (call=31)
+2019-03-22T10:42:43.668990+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:57:27.930481+08:00 15sp1-1 pacemaker-controld[1740]: notice: Peer 15sp1-2 was terminated (reboot) by 15sp1-1 on behalf of pacemaker-controld.1740: OK
+2019-03-22T10:57:52.410569+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=175)
+2019-03-22T11:00:43.930597+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=34)
+2019-03-22T11:01:29.688725+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=62)
+2019-03-22T11:02:23.786295+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=85)
+2019-03-22T10:39:55.137238+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:39:55.137767+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:39:57.604345+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:2467ms queue-time:0ms
+2019-03-22T10:41:13.905506+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:41:13.913809+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:41:13.913941+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:13.914056+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T10:41:13.914284+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T10:41:15.074728+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1170ms queue-time:0ms
+2019-03-22T10:41:16.497053+08:00 15sp1-2 pacemaker-controld[2965]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-2
+2019-03-22T10:41:16.497127+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:16.497217+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:2ms queue-time:0ms
+2019-03-22T10:42:44.878768+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:42:44.880933+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:42:46.405487+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1524ms queue-time:0ms
+2019-03-22T10:43:08.620641+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:43:08.620831+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:43:08.621463+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:1ms queue-time:0ms
+2019-03-22T10:54:17.948621+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:54:17.948709+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:42
+2019-03-22T10:54:19.157468+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:42 exit-code:0 exec-time:1209ms queue-time:0ms
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.496863+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:start call_id:39
+2019-03-22T10:54:48.510603+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:start call_id:39 pid:2145 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:48.474653+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d2_start_0 on 15sp1-2
+2019-03-22T10:54:58.218867+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:stop call_id:40
+2019-03-22T10:54:58.234531+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:stop call_id:40 pid:2150 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.196862+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d2_stop_0 on 15sp1-2
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:00:42.659431+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T11:00:42.660180+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:58
+2019-03-22T11:00:42.660574+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:58 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T11:00:42.661106+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T11:00:42.660196+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:14
+2019-03-22T11:00:43.862608+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:14 exit-code:0 exec-time:1202ms queue-time:0ms
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.233648+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d2_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+2019-03-22T11:03:05.232910+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d2 action:start call_id:22
+2019-03-22T11:03:05.246921+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d2 action:start call_id:22 pid:1852 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T11:45:14.806899+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-1
+2019-03-22T11:45:14.805511+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:stonith-sbd action:start call_id:34
+2019-03-22T11:45:16.071026+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:stonith-sbd action:start call_id:34 exit-code:0 exec-time:1266ms queue-time:0ms
+2019-03-22T11:46:15.742947+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-2
+2019-03-22T11:46:15.743031+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:45
+2019-03-22T11:46:16.907002+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:45 exit-code:0 exec-time:1165ms queue-time:0ms
+.INP: node 15sp1-1
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: node .*
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude pcmk_peer_update
+.INP: exclude
+pcmk_peer_update
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude clear
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: exclude clear
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: peinputs v
+Date Start End Filename Client User Origin Tags
+==== ===== === ======== ====== ==== ====== ====
+2019-03-22 18:35:11 18:35:11 pe-input-3 crmd hacluster 15sp1-1
+2019-03-22 18:36:10 18:36:10 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:37:14 18:37:14 pe-input-5 crmd hacluster 15sp1-1
+2019-03-22 18:39:51 18:39:51 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:39:55 18:39:57 pe-input-5 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 18:39:57 pe-input-6 cibadmin root 15sp1-1
+2019-03-22 18:40:41 18:40:41 pe-input-7 cibadmin root 15sp1-1
+2019-03-22 18:41:13 18:41:15 pe-input-8 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 --:--:-- pe-input-7 crmd hacluster 15sp1-1
+2019-03-22 18:41:16 18:41:16 pe-input-8 crmd hacluster 15sp1-1 stonith-sbd
+2019-03-22 18:42:44 18:42:46 pe-input-9 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 18:43:08 pe-input-10 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 18:43:23 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 18:43:44 18:43:45 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 18:44:29 18:44:29 pe-input-13 cibadmin root 15sp1-1
+2019-03-22 18:44:36 18:44:36 pe-input-14 cibadmin root 15sp1-1
+2019-03-22 18:44:38 18:44:38 pe-input-15 cibadmin root 15sp1-1
+2019-03-22 18:44:59 18:45:00 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 18:45:14 18:45:14 pe-input-17 cibadmin root 15sp1-1
+2019-03-22 18:45:32 18:45:32 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 18:45:37 18:45:37 pe-input-19 cibadmin root 15sp1-1
+2019-03-22 18:48:50 18:48:50 pe-input-20 cibadmin root 15sp1-1
+2019-03-22 18:48:51 --:--:-- pe-input-21 cibadmin root 15sp1-1
+2019-03-22 18:49:48 18:49:48 pe-input-23 cibadmin root 15sp1-1
+2019-03-22 18:49:53 18:49:53 pe-input-24 cibadmin root 15sp1-1
+2019-03-22 18:51:19 18:51:19 pe-input-25 cibadmin root 15sp1-1
+2019-03-22 18:51:39 18:51:39 pe-input-26 cibadmin root 15sp1-1
+2019-03-22 18:51:53 18:51:53 pe-input-27 cibadmin root 15sp1-1
+2019-03-22 18:51:54 --:--:-- pe-input-28 cibadmin root 15sp1-1
+2019-03-22 18:52:06 18:52:06 pe-input-30 cibadmin root 15sp1-1
+2019-03-22 18:52:25 18:52:25 pe-input-31 cibadmin root 15sp1-1
+2019-03-22 18:53:09 18:53:09 pe-input-32 cibadmin root 15sp1-1
+2019-03-22 18:53:15 18:53:15 pe-input-33 cibadmin root 15sp1-1
+2019-03-22 18:53:15 --:--:-- pe-input-34 cibadmin root 15sp1-1
+2019-03-22 18:54:08 18:54:08 pe-input-36 cibadmin root 15sp1-1
+2019-03-22 18:54:17 18:54:19 pe-input-37 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 18:54:48 pe-input-38 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:54:58 18:54:58 pe-input-39 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:56:18 18:56:19 pe-input-40 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 18:56:19 pe-input-41 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-42 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 --:--:-- pe-input-43 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-44 cibadmin root 15sp1-1 error
+2019-03-22 18:56:42 18:56:42 pe-input-45 cibadmin root 15sp1-1
+2019-03-22 18:56:43 --:--:-- pe-input-46 cibadmin root 15sp1-1
+2019-03-22 18:56:55 18:57:27 pe-warn-0 cibadmin root 15sp1-1
+2019-03-22 18:57:52 18:57:52 pe-input-48 cibadmin root 15sp1-1
+2019-03-22 19:00:42 19:00:43 pe-input-49 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 19:01:31 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 19:02:24 19:02:24 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 19:03:05 19:03:05 pe-input-13 cibadmin root 15sp1-1 d1 d2
+2019-03-22 19:45:14 19:45:16 pe-input-15 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:45:16 19:45:16 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:15 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:16 pe-input-19 cibadmin root 15sp1-1 stonith-sbd
+.INP: transitions
+Time Name Node Tags
+2019-03-22 18:35:11 - 18:35:11: pe-input-3 15sp1-2
+2019-03-22 18:36:10 - 18:36:10: pe-input-4 15sp1-2
+2019-03-22 18:37:14 - 18:37:14: pe-input-5 15sp1-2
+2019-03-22 18:39:51 - 18:39:51: pe-input-4 15sp1-1
+2019-03-22 18:39:55 - 18:39:57: pe-input-5 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 - 18:39:57: pe-input-6 15sp1-1
+2019-03-22 18:40:41 - 18:40:41: pe-input-7 15sp1-1
+2019-03-22 18:41:13 - 18:41:15: pe-input-8 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 - --:--:--: pe-input-7 15sp1-2
+2019-03-22 18:41:16 - 18:41:16: pe-input-8 15sp1-2 stonith-sbd
+2019-03-22 18:42:44 - 18:42:46: pe-input-9 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 - 18:43:08: pe-input-10 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 - 18:43:23: pe-input-11 15sp1-1
+2019-03-22 18:43:44 - 18:43:45: pe-input-12 15sp1-1
+2019-03-22 18:44:29 - 18:44:29: pe-input-13 15sp1-1
+2019-03-22 18:44:36 - 18:44:36: pe-input-14 15sp1-1
+2019-03-22 18:44:38 - 18:44:38: pe-input-15 15sp1-1
+2019-03-22 18:44:59 - 18:45:00: pe-input-16 15sp1-1
+2019-03-22 18:45:14 - 18:45:14: pe-input-17 15sp1-1
+2019-03-22 18:45:32 - 18:45:32: pe-input-18 15sp1-1
+2019-03-22 18:45:37 - 18:45:37: pe-input-19 15sp1-1
+2019-03-22 18:48:50 - 18:48:50: pe-input-20 15sp1-1
+2019-03-22 18:48:51 - --:--:--: pe-input-21 15sp1-1
+2019-03-22 18:49:48 - 18:49:48: pe-input-23 15sp1-1
+2019-03-22 18:49:53 - 18:49:53: pe-input-24 15sp1-1
+2019-03-22 18:51:19 - 18:51:19: pe-input-25 15sp1-1
+2019-03-22 18:51:39 - 18:51:39: pe-input-26 15sp1-1
+2019-03-22 18:51:53 - 18:51:53: pe-input-27 15sp1-1
+2019-03-22 18:51:54 - --:--:--: pe-input-28 15sp1-1
+2019-03-22 18:52:06 - 18:52:06: pe-input-30 15sp1-1
+2019-03-22 18:52:25 - 18:52:25: pe-input-31 15sp1-1
+2019-03-22 18:53:09 - 18:53:09: pe-input-32 15sp1-1
+2019-03-22 18:53:15 - 18:53:15: pe-input-33 15sp1-1
+2019-03-22 18:53:15 - --:--:--: pe-input-34 15sp1-1
+2019-03-22 18:54:08 - 18:54:08: pe-input-36 15sp1-1
+2019-03-22 18:54:17 - 18:54:19: pe-input-37 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 - 18:54:48: pe-input-38 15sp1-1 d1 d2
+2019-03-22 18:54:58 - 18:54:58: pe-input-39 15sp1-1 d1 d2
+2019-03-22 18:56:18 - 18:56:19: pe-input-40 15sp1-1 error
+2019-03-22 18:56:19 - 18:56:19: pe-input-41 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-42 15sp1-1 error
+2019-03-22 18:56:19 - --:--:--: pe-input-43 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-44 15sp1-1 error
+2019-03-22 18:56:42 - 18:56:42: pe-input-45 15sp1-1
+2019-03-22 18:56:43 - --:--:--: pe-input-46 15sp1-1
+2019-03-22 18:56:55 - 18:57:27: pe-warn-0 15sp1-1
+2019-03-22 18:57:52 - 18:57:52: pe-input-48 15sp1-1
+2019-03-22 19:00:42 - 19:00:43: pe-input-49 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 - 19:01:31: pe-input-11 15sp1-2
+2019-03-22 19:02:24 - 19:02:24: pe-input-12 15sp1-2
+2019-03-22 19:03:05 - 19:03:05: pe-input-13 15sp1-2 d1 d2
+2019-03-22 19:45:14 - 19:45:16: pe-input-15 15sp1-2 stonith-sbd
+2019-03-22 19:45:16 - 19:45:16: pe-input-16 15sp1-2
+2019-03-22 19:46:15 - 19:46:15: pe-input-18 15sp1-2
+2019-03-22 19:46:15 - 19:46:16: pe-input-19 15sp1-2 stonith-sbd
+.INP: refresh
+Refreshing log data...
+55 transitions, 116 events.
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: # reduce report span
+.INP: timeframe "2019-03-22 15:07:37"
+WARNING: 20: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 20: "timeframe" is accepted as "limit"
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: transition log
+2019-03-22T11:46:15.797222+08:00 15sp1-2 sbd[2770]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:46:15.812786+08:00 15sp1-2 sbd[2774]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+.INP: transition nograph
+INFO: 25: running ptest with history-test/15sp1-2/pengine/pe-input-19.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-19 (19:46:15 - 19:46:16):
+ total 1 actions: 1 Complete
+.INP: transition -1 nograph
+INFO: 26: running ptest with history-test/15sp1-2/pengine/pe-input-18.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-18 (19:46:15 - 19:46:15):
+ total 12 actions: 7 Complete, 1 Skipped, 4 Incomplete
+.INP: transition save 0 _crmsh_regtest
+INFO: 27: transition history-test/15sp1-2/pengine/pe-input-19.bz2 saved to shadow _crmsh_regtest
+.INP: transition log 49
+2019-03-22T11:00:42.614804+08:00 15sp1-1 systemd[1]: Stopped target Timers.
+2019-03-22T11:00:42.615759+08:00 15sp1-1 systemd[1]: Stopped Discard unused blocks once a week.
+2019-03-22T11:00:42.615966+08:00 15sp1-1 systemd[1]: Stopped Scrub btrfs filesystem, verify block checksums.
+2019-03-22T11:00:42.616312+08:00 15sp1-1 systemd[1]: Stopped target Sound Card.
+2019-03-22T11:00:42.616521+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Temporary Directories.
+2019-03-22T11:00:42.616840+08:00 15sp1-1 systemd[1]: Stopped target Multi-User System.
+2019-03-22T11:00:42.617530+08:00 15sp1-1 pacemakerd[1733]: notice: Caught 'Terminated' signal
+2019-03-22T11:00:42.617672+08:00 15sp1-1 pacemakerd[1733]: notice: Shutting down Pacemaker
+2019-03-22T11:00:42.635974+08:00 15sp1-1 systemd[1]: Stopping Pacemaker High Availability Cluster Manager...
+2019-03-22T11:00:42.640402+08:00 15sp1-1 systemd[1]: Stopped target Login Prompts.
+2019-03-22T11:00:42.649788+08:00 15sp1-1 systemd[1]: Stopping Session 1 of user root.
+2019-03-22T11:00:42.656415+08:00 15sp1-1 systemd[1]: Stopping OpenSSH Daemon...
+2019-03-22T11:00:42.659094+08:00 15sp1-1 systemd[1]: Stopped Detect if the system suffers from bsc#1089761.
+2019-03-22T11:00:42.660023+08:00 15sp1-1 systemd[1]: Stopped Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.660434+08:00 15sp1-1 systemd[1]: Stopping Restore /run/initramfs on shutdown...
+2019-03-22T11:00:42.660712+08:00 15sp1-1 systemd[1]: Stopped Do daily mandb update.
+2019-03-22T11:00:42.660980+08:00 15sp1-1 systemd[1]: Stopped Check if mainboard battery is Ok.
+2019-03-22T11:00:42.661239+08:00 15sp1-1 systemd[1]: Stopped Early Kernel Boot Messages.
+2019-03-22T11:00:42.661471+08:00 15sp1-1 systemd[1]: Stopped Apply settings from /etc/sysconfig/keyboard.
+2019-03-22T11:00:42.661722+08:00 15sp1-1 systemd[1]: Closed LVM2 poll daemon socket.
+2019-03-22T11:00:42.661854+08:00 15sp1-1 systemd[1]: Stopped Backup of RPM database.
+2019-03-22T11:00:42.661990+08:00 15sp1-1 systemd[1]: Stopped Backup of /etc/sysconfig.
+2019-03-22T11:00:42.663466+08:00 15sp1-2 systemd[1]: Started Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.673313+08:00 15sp1-1 systemd[1766]: Stopped target Default.
+2019-03-22T11:00:42.673554+08:00 15sp1-1 systemd[1766]: Stopped target Basic System.
+2019-03-22T11:00:42.673738+08:00 15sp1-1 systemd[1766]: Stopped target Sockets.
+2019-03-22T11:00:42.673880+08:00 15sp1-1 systemd[1766]: Closed D-Bus User Message Bus Socket.
+2019-03-22T11:00:42.674004+08:00 15sp1-1 systemd[1766]: Stopped target Paths.
+2019-03-22T11:00:42.674122+08:00 15sp1-1 systemd[1766]: Reached target Shutdown.
+2019-03-22T11:00:42.674236+08:00 15sp1-1 systemd[1766]: Stopped target Timers.
+2019-03-22T11:00:42.674360+08:00 15sp1-1 systemd[1766]: Starting Exit the Session...
+2019-03-22T11:00:42.674478+08:00 15sp1-1 systemd[1]: Stopping User Manager for UID 0...
+2019-03-22T11:00:42.674594+08:00 15sp1-1 systemd[1]: Stopped Balance block groups on a btrfs filesystem.
+2019-03-22T11:00:42.674701+08:00 15sp1-1 systemd[1]: Stopping iSCSI UserSpace I/O driver...
+2019-03-22T11:00:42.674806+08:00 15sp1-1 systemd[1]: Stopping Getty on tty1...
+2019-03-22T11:00:42.674911+08:00 15sp1-1 systemd[1]: Stopping Command Scheduler...
+2019-03-22T11:00:42.675020+08:00 15sp1-1 systemd[1]: Stopped Daily rotation of log files.
+2019-03-22T11:00:42.675126+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Snapper Snapshots.
+2019-03-22T11:00:42.675231+08:00 15sp1-1 systemd[1]: Removed slice system-systemd\x2dhibernate\x2dresume.slice.
+2019-03-22T11:00:42.675345+08:00 15sp1-1 systemd[1]: Stopped iSCSI UserSpace I/O driver.
+2019-03-22T11:00:42.675452+08:00 15sp1-1 systemd[1]: Stopped OpenSSH Daemon.
+2019-03-22T11:00:42.675561+08:00 15sp1-1 systemd[1]: Stopped Session 1 of user root.
+2019-03-22T11:00:42.683003+08:00 15sp1-1 systemd-logind[819]: Session 1 logged out. Waiting for processes to exit.
+2019-03-22T11:00:42.683239+08:00 15sp1-1 systemd[1]: Stopped Getty on tty1.
+2019-03-22T11:00:42.683375+08:00 15sp1-1 systemd[1]: Stopped Restore /run/initramfs on shutdown.
+2019-03-22T11:00:42.683487+08:00 15sp1-1 systemd-logind[819]: Removed session 1.
+2019-03-22T11:00:42.683603+08:00 15sp1-1 systemd[1]: Starting Show Plymouth Reboot Screen...
+2019-03-22T11:00:42.683861+08:00 15sp1-1 systemd[1]: Removed slice system-getty.slice.
+2019-03-22T11:00:42.686592+08:00 15sp1-1 systemd[1]: Received SIGRTMIN+20 from PID 5230 (plymouthd).
+2019-03-22T11:00:42.687482+08:00 15sp1-2 dbus-daemon[768]: [system] Activating service name='org.opensuse.Snapper' requested by ':1.13' (uid=0 pid=1835 comm="/usr/lib/snapper/systemd-helper --timeline ") (using servicehelper)
+2019-03-22T11:00:42.687871+08:00 15sp1-1 cron[1730]: (CRON) INFO (Shutting down)
+2019-03-22T11:00:42.689646+08:00 15sp1-1 systemd[1]: Stopped Command Scheduler.
+2019-03-22T11:00:42.689784+08:00 15sp1-1 systemd[1]: Stopping Postfix Mail Transport Agent...
+2019-03-22T11:00:42.705412+08:00 15sp1-2 dbus-daemon[768]: [system] Successfully activated service 'org.opensuse.Snapper'
+2019-03-22T11:00:42.745173+08:00 15sp1-2 sbd[1847]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.760480+08:00 15sp1-2 sbd[1851]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.765095+08:00 15sp1-1 systemd[1]: Stopped Postfix Mail Transport Agent.
+2019-03-22T11:00:42.765239+08:00 15sp1-1 systemd[1]: Stopped target Host and Network Name Lookups.
+.INP: transition tags 49
+stonith-sbd
+.INP: # reset timeframe
+.INP: timeframe
+WARNING: 31: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 31: "timeframe" is accepted as "limit"
+.INP: session save _crmsh_regtest
+.INP: session load _crmsh_regtest
+.INP: session
+current session: _crmsh_regtest
+.INP: session pack
+.TRY History 2
+.INP: history
+.INP: session load _crmsh_regtest
+.INP: exclude
+corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
diff --git a/test/testcases/history.post b/test/testcases/history.post
new file mode 100755
index 0000000..b5bb7fc
--- /dev/null
+++ b/test/testcases/history.post
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -r history-test
diff --git a/test/testcases/history.pre b/test/testcases/history.pre
new file mode 100755
index 0000000..4905f13
--- /dev/null
+++ b/test/testcases/history.pre
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -rf history-test
diff --git a/test/testcases/newfeatures b/test/testcases/newfeatures
new file mode 100644
index 0000000..5723625
--- /dev/null
+++ b/test/testcases/newfeatures
@@ -0,0 +1,44 @@
+session New features
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive p0 Dummy params $p0-state:state=1
+primitive p1 Dummy params \
+ rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 \
+ state=2
+primitive p2 Dummy params @p0-state
+property rule #uname eq node1 stonith-enabled=no
+tag tag1: p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+primitive node1 Dummy
+tag ones l1 p1
+alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ trap_node_states="non-trap" \
+ trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to "192.168.40.9"
+alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ select fencing nodes resources \
+ to 192.168.28.188
+show tag:ones and type:location
+show tag:ones and p1
+show
+_test
+verify
+commit
+.
diff --git a/test/testcases/newfeatures.exp b/test/testcases/newfeatures.exp
new file mode 100644
index 0000000..897f315
--- /dev/null
+++ b/test/testcases/newfeatures.exp
@@ -0,0 +1,81 @@
+.TRY New features
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive p0 Dummy params $p0-state:state=1
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p1 Dummy params rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2
+.INP: primitive p2 Dummy params @p0-state
+.INP: property rule #uname eq node1 stonith-enabled=no
+.INP: tag tag1: p0 p1 p2
+.INP: tag tag2 p0 p1 p2
+.INP: location l1 { p0 p1 p2 } inf: node1
+.INP: primitive node1 Dummy
+.INP: tag ones l1 p1
+.INP: alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" trap_node_states="non-trap" trap_resource_tasks="start,stop,monitor,promote,demote" to "192.168.40.9"
+.INP: alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" select attributes { master-prmStateful test1 } to 192.168.28.188
+.INP: alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh select fencing nodes resources to 192.168.28.188
+.INP: show tag:ones and type:location
+location l1 { p0 p1 p2 } inf: node1
+.INP: show tag:ones and p1
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+.INP: show
+node node1
+primitive node1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p0 Dummy \
+ params state=1 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ params @p0-state \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+tag ones l1 p1
+tag tag1 p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+property cib-bootstrap-options: \
+ rule #uname eq node1 \
+ stonith-enabled=no
+alert notify_10 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ select fencing nodes resources \
+ to 192.168.28.188
+alert notify_9 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false trap_node_states=non-trap trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to 192.168.40.9
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: commit
diff --git a/test/testcases/node b/test/testcases/node
new file mode 100644
index 0000000..f0a5fc1
--- /dev/null
+++ b/test/testcases/node
@@ -0,0 +1,14 @@
+node show
+node show node1
+%setenv showobj=node1
+configure primitive p5 Dummy
+configure group g0 p5
+resource maintenance g0
+resource maintenance p5
+-F node maintenance node1
+node ready node1
+node attribute node1 set a1 "1 2 3"
+node attribute node1 show a1
+node attribute node1 delete a1
+node clearstate node1
+
diff --git a/test/testcases/node.exp b/test/testcases/node.exp
new file mode 100644
index 0000000..d91c33c
--- /dev/null
+++ b/test/testcases/node.exp
@@ -0,0 +1,204 @@
+.TRY node show
+node1: member
+.TRY node show node1
+node1: member
+.SETENV showobj=node1
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g0 p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance g0
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY -F node maintenance node1
+INFO: 'maintenance' attribute already exists in p5. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g0. Remove it? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node ready node1
+.EXT crm_attribute -t nodes -N 'node1' -n maintenance -v 'off'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 set a1 "1 2 3"
+.EXT crm_attribute -t nodes -N 'node1' -n 'a1' -v '1 2 3'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 show a1
+.EXT crm_attribute -G -t nodes -N 'node1' -n 'a1'
+scope=nodes name=a1 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 delete a1
+.EXT crm_attribute -D -t nodes -N 'node1' -n 'a1'
+Deleted nodes attribute: id=nodes-node1-a1 name=a1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node clearstate node1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/options b/test/testcases/options
new file mode 100644
index 0000000..44f331b
--- /dev/null
+++ b/test/testcases/options
@@ -0,0 +1,23 @@
+session Options
+options
+reset
+pager cat
+editor vi
+show
+check-frequency never
+check-mode nosuchever
+colorscheme normal,yellow,cyan,red,green,magenta
+colorscheme normal,yellow,cyan,red
+pager nosuchprogram
+skill-level operator
+skill-level joe
+skill-level expert
+output plain
+output misplain
+wait true
+wait off
+wait happy
+show
+save
+.
+options show
diff --git a/test/testcases/options.exp b/test/testcases/options.exp
new file mode 100644
index 0000000..f13d308
--- /dev/null
+++ b/test/testcases/options.exp
@@ -0,0 +1,64 @@
+.TRY Options
+.INP: options
+.INP: reset
+.INP: pager cat
+.INP: editor vi
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "color"
+colorscheme "yellow,normal,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "always"
+check-mode "strict"
+wait "no"
+add-quotes "yes"
+manage-children "ask"
+.INP: check-frequency never
+.INP: check-mode nosuchever
+ERROR: nosuchever not valid (choose one from strict,relaxed)
+.INP: colorscheme normal,yellow,cyan,red,green,magenta
+.INP: colorscheme normal,yellow,cyan,red
+ERROR: bad color scheme: normal,yellow,cyan,red
+.INP: pager nosuchprogram
+ERROR: nosuchprogram does not exist or is not a program
+.INP: skill-level operator
+.INP: skill-level joe
+ERROR: joe not valid (choose one from operator,administrator,expert)
+.INP: skill-level expert
+.INP: output plain
+.INP: output misplain
+ERROR: misplain not valid (choose one from plain,color,uppercase)
+.INP: wait true
+.INP: wait off
+.INP: wait happy
+ERROR: happy not valid (yes or no are valid)
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
+.INP: save
+.TRY options show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
diff --git a/test/testcases/ra b/test/testcases/ra
new file mode 100644
index 0000000..bd44a3a
--- /dev/null
+++ b/test/testcases/ra
@@ -0,0 +1,7 @@
+session RA interface
+ra
+providers IPaddr
+providers Dummy
+info ocf:pacemaker:Dummy
+info stonith:external/ssh
+.
diff --git a/test/testcases/ra.exp b/test/testcases/ra.exp
new file mode 100644
index 0000000..5d15734
--- /dev/null
+++ b/test/testcases/ra.exp
@@ -0,0 +1,150 @@
+.TRY RA interface
+.INP: ra
+.INP: providers IPaddr
+
+heartbeat
+.INP: providers Dummy
+heartbeat pacemaker
+.INP: info ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+Example stateless resource agent (ocf:pacemaker:Dummy)
+
+This is a dummy OCF resource agent. It does absolutely nothing except keep track
+of whether it is running or not, and can be configured so that actions fail or
+take a long time. Its purpose is primarily for testing, and to serve as a
+template for resource agent writers.
+
+Parameters (*: required, []: default):
+
+state (string, [state-file]): State file
+ Location to store the resource state in.
+
+passwd (string): Password
+ Fake password field
+
+fake (string, [dummy]):
+ Fake attribute that can be changed to cause an agent reload
+
+op_sleep (string, [0]): Operation sleep duration in seconds.
+ Number of seconds to sleep during operations. This can be used to test how
+ the cluster reacts to operation timeouts.
+
+fail_start_on (string): Report bogus start failure on specified host
+ Start, migrate_from, and reload-agent actions will return failure if running on
+ the host specified here, but the resource will run successfully anyway (future
+ monitor calls will find it running). This can be used to test on-fail=ignore.
+
+envfile (string): Environment dump file
+ If this is set, the environment will be dumped to this file for every call.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20s
+ stop timeout=20s
+ monitor timeout=20s interval=10s depth=0
+ reload timeout=20s
+ reload-agent timeout=20s
+ migrate_to timeout=20s
+ migrate_from timeout=20s
+.INP: info stonith:external/ssh
+.EXT crm_resource --show-metadata stonith:external/ssh
+.EXT stonithd metadata
+ssh STONITH device (stonith:external/ssh)
+
+ssh-based host reset
+Fine for testing, but not suitable for production!
+Only reboot action supported, no poweroff, and, surprisingly enough, no poweron.
+
+Parameters (*: required, []: default):
+
+hostlist* (string): Hostlist
+ The list of hosts that the STONITH device controls
+
+livedangerously (enum): Live Dangerously!!
+ Set to "yes" if you want to risk your system's integrity.
+ Of course, since this plugin isn't for production, using it
+ in production at all is a bad idea. On the other hand,
+ setting this parameter to yes makes it an even worse idea.
+ Viva la Vida Loca!
+
+pcmk_host_argument (string, [port]): Advanced use only: An alternate parameter to supply instead of 'port'
+ some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of none can be used to tell the cluster not to supply any additional parameters.
+
+pcmk_host_map (string): A mapping of host names to ports numbers for devices that do not support host names.
+ Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2
+
+pcmk_host_list (string): Eg. node1,node2,node3
+ A list of machines controlled by this device (Optional unless pcmk_host_list=static-list)
+
+pcmk_host_check (string, [dynamic-list]): How to determine which machines are controlled by the device.
+ Allowed values: dynamic-list (query the device via the 'list' command), static-list (check the pcmk_host_list attribute), status (query the device via the 'status' command), none (assume every device can fence every machine)
+
+pcmk_delay_max (time, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
+
+pcmk_delay_base (string, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value.This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value per target.
+
+pcmk_action_limit (integer, [1]): The maximum number of actions can be performed in parallel on this device
+ Cluster property concurrent-fencing=true needs to be configured first.Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.
+
+pcmk_reboot_action (string, [reboot]): Advanced use only: An alternate command to run instead of 'reboot'
+ Some devices do not support the standard commands or may provide additional ones.\nUse this to specify an alternate, device-specific, command that implements the 'reboot' action.
+
+pcmk_reboot_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
+
+pcmk_reboot_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up.
+
+pcmk_off_action (string, [off]): Advanced use only: An alternate command to run instead of 'off'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'off' action.
+
+pcmk_off_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'off' actions.
+
+pcmk_off_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'off' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up.
+
+pcmk_on_action (string, [on]): Advanced use only: An alternate command to run instead of 'on'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'on' action.
+
+pcmk_on_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'on' actions.
+
+pcmk_on_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'on' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up.
+
+pcmk_list_action (string, [list]): Advanced use only: An alternate command to run instead of 'list'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'list' action.
+
+pcmk_list_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'list' actions.
+
+pcmk_list_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'list' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up.
+
+pcmk_monitor_action (string, [monitor]): Advanced use only: An alternate command to run instead of 'monitor'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
+
+pcmk_monitor_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.\nUse this to specify an alternate, device-specific, timeout for 'monitor' actions.
+
+pcmk_monitor_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up.
+
+pcmk_status_action (string, [status]): Advanced use only: An alternate command to run instead of 'status'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'status' action.
+
+pcmk_status_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'status' actions.
+
+pcmk_status_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'status' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20
+ stop timeout=15
+ status timeout=20
+ monitor timeout=20 interval=3600
diff --git a/test/testcases/ra.filter b/test/testcases/ra.filter
new file mode 100755
index 0000000..bc57a83
--- /dev/null
+++ b/test/testcases/ra.filter
@@ -0,0 +1,17 @@
+#!/usr/bin/awk -f
+# reduce the providers list to heartbeat and pacemaker
+# (prevents other providers creeping in)
+function reduce(a) {
+ a["heartbeat"]=1; a["pacemaker"]=1;
+ s="";
+ for( i=1; i<=NF; i++ )
+ if( $i in a )
+ s=s" "$i;
+ return substr(s,2);
+}
+n==1 { n=0; print reduce(a); next; }
+/providers IPaddr/ { n=1; }
+/providers Dummy/ { n=1; }
+/^ssh STONITH/ { sub(" external",""); }
+/^state \(string, \[(.*)\]\):/ { gsub(/\[.*\]/, "[state-file]") }
+{ print }
diff --git a/test/testcases/resource b/test/testcases/resource
new file mode 100644
index 0000000..8fad9b6
--- /dev/null
+++ b/test/testcases/resource
@@ -0,0 +1,84 @@
+resource status p0
+%setenv showobj=p3
+resource start p3
+resource stop p3
+%setenv showobj=c1
+resource manage c1
+resource unmanage c1
+%setenv showobj=p2
+resource maintenance p2 on
+resource maintenance p2 off
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1 force
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=p0
+resource param p0 set a0 "1 2 3"
+resource param p0 show a0
+resource param p0 delete a0
+resource meta p0 set m0 123
+resource meta p0 show m0
+resource meta p0 delete m0
+resource trace p0 probe
+resource trace p0 start
+resource trace p0 stop
+resource untrace p0 probe
+resource untrace p0 start
+resource untrace p0 stop
+configure group g p0 p3
+options manage-children never
+resource start g
+resource start p0
+resource stop g
+configure clone cg g
+options manage-children always
+resource start g
+resource stop g
+resource start cg
+resource stop p0
+resource start cg
+resource stop cg
+resource stop p3
+%setenv showobj=
+configure rename p3 p4
+configure primitive p3 Dummy
+resource stop p3
+resource start p3
+resource cleanup
+resource cleanup p3
+resource cleanup p3 node1
+resource cleanup force
+resource cleanup p3 force
+resource cleanup p3 node1 force
+resource refresh
+resource refresh p3
+resource refresh p3 node1
+resource refresh force
+resource refresh p3 force
+resource refresh p3 node1 force
+resource stop p3
+configure rm cg
+configure ms msg g
+resource scores
+%setenv showobj=
+configure primitive p5 Dummy
+configure group g1 p5
+resource manage p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=p5
+-F resource unmanage p5
+%setenv showobj=p5
+-F resource maintenance g1
+resource start p5
+%setenv showobj=g1
+-F resource manage g1
+resource start p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=g1
+-F resource maintenance g1
diff --git a/test/testcases/resource.exp b/test/testcases/resource.exp
new file mode 100644
index 0000000..c03aae7
--- /dev/null
+++ b/test/testcases/resource.exp
@@ -0,0 +1,1450 @@
+.TRY resource status p0
+.EXT crm_resource --locate --resource 'p0'
+resource p0 is NOT running
+.SETENV showobj=p3
+.TRY resource start p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=c1
+.TRY resource manage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource unmanage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p2
+.TRY resource maintenance p2 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p2 off
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="false"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1'
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1 force
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1' --force
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=p0
+.TRY resource param p0 set a0 "1 2 3"
+.EXT crm_resource --resource 'p0' --set-parameter 'a0' --parameter-value '1 2 3'
+Set 'p0' option: id=p0-instance_attributes-a0 set=p0-instance_attributes name=a0 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 show a0
+.EXT crm_resource --resource 'p0' --get-parameter 'a0'
+1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 delete a0
+.EXT crm_resource --resource 'p0' --delete-parameter 'a0'
+Deleted 'p0' option: id=p0-instance_attributes-a0 name=a0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 set m0 123
+.EXT crm_resource --meta --resource 'p0' --set-parameter 'm0' --parameter-value '123'
+Set 'p0' option: id=p0-meta_attributes-m0 set=p0-meta_attributes name=m0 value=123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 show m0
+.EXT crm_resource --meta --resource 'p0' --get-parameter 'm0'
+123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 delete m0
+.EXT crm_resource --meta --resource 'p0' --delete-parameter 'm0'
+Deleted 'p0' option: id=p0-meta_attributes-m0 name=m0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 probe
+INFO: Trace for p0:monitor is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace non-monitor operations
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 start
+INFO: Trace for p0:start is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the start operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 stop
+INFO: Trace for p0:stop is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the stop operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 probe
+INFO: Stop tracing p0 for operation monitor
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 start
+INFO: Stop tracing p0 for operation start
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 stop
+INFO: Stop tracing p0 for operation stop
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g p0 p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children never
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure clone cg g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children always
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY configure rename p3 p4
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure primitive p3 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY resource stop p3
+.TRY resource start p3
+.TRY resource cleanup
+.EXT crm_resource --cleanup
+.TRY resource cleanup p3
+.EXT crm_resource --cleanup --resource p3
+.TRY resource cleanup p3 node1
+.EXT crm_resource --cleanup --resource p3 --node node1
+.TRY resource cleanup force
+.EXT crm_resource --cleanup --force
+.TRY resource cleanup p3 force
+.EXT crm_resource --cleanup --resource p3 --force
+.TRY resource cleanup p3 node1 force
+.EXT crm_resource --cleanup --resource p3 --node node1 --force
+.TRY resource refresh
+.EXT crm_resource --refresh
+.TRY resource refresh p3
+.EXT crm_resource --refresh --resource p3
+.TRY resource refresh p3 node1
+.EXT crm_resource --refresh --resource p3 --node node1
+.TRY resource refresh force
+.EXT crm_resource --refresh --force
+.TRY resource refresh p3 force
+.EXT crm_resource --refresh --resource p3 --force
+.TRY resource refresh p3 node1 force
+.EXT crm_resource --refresh --resource p3 --node node1 --force
+.TRY resource stop p3
+.TRY configure rm cg
+WARNING: This command 'rm' is deprecated, please use 'delete'
+INFO: "rm" is accepted as "delete"
+.TRY configure ms msg g
+WARNING: "ms" is deprecated. Please use "clone msg g meta promotable=true"
+.TRY resource scores
+.EXT crm_simulate -sUL
+2 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Node node1: UNCLEAN (offline)
+
+ st (stonith:null): Stopped
+ Stopped: [ node1 ]
+ Stopped: [ node1 ]
+ p3 (ocf::heartbeat:Dummy): Stopped ( disabled )
+ Stopped: [ node1 ]
+
+Original: node1 capacity:
+pcmk__primitive_assign: st allocation score on node1: 0
+pcmk__clone_assign: c1 allocation score on node1: 0
+pcmk__clone_assign: p1:0 allocation score on node1: 0
+pcmk__primitive_assign: p1:0 allocation score on node1: -INFINITY
+pcmk__clone_assign: m1 allocation score on node1: 0
+pcmk__clone_assign: p2:0 allocation score on node1: 0
+pcmk__primitive_assign: p2:0 allocation score on node1: -INFINITY
+p2:0 promotion score on none: 0
+pcmk__primitive_assign: p3 allocation score on node1: -INFINITY
+pcmk__clone_assign: msg allocation score on node1: 0
+pcmk__clone_assign: g:0 allocation score on node1: 0
+pcmk__clone_assign: p0:0 allocation score on node1: 0
+pcmk__clone_assign: p4:0 allocation score on node1: 0
+pcmk__group_assign: g:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p4:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p4:0 allocation score on node1: -INFINITY
+g:0 promotion score on none: 0
+Remaining: node1 capacity:
+
+.SETENV showobj=
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure group g1 p5
+.TRY resource manage p5
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes-0">
+ <nvpair id="p5-meta_attributes-0-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource unmanage p5
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance g1
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource manage g1
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ <nvpair id="p5-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource maintenance g1
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/rset b/test/testcases/rset
new file mode 100644
index 0000000..798e392
--- /dev/null
+++ b/test/testcases/rset
@@ -0,0 +1,21 @@
+show Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+show o1 c1 c2 c3
+delete d4
+show o1 c1 c2 c3
+_test
+verify
+.
diff --git a/test/testcases/rset-xml b/test/testcases/rset-xml
new file mode 100644
index 0000000..842d4df
--- /dev/null
+++ b/test/testcases/rset-xml
@@ -0,0 +1,19 @@
+showxml Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+delete d4
+_test
+verify
+.
diff --git a/test/testcases/rset-xml.exp b/test/testcases/rset-xml.exp
new file mode 100644
index 0000000..51c431a
--- /dev/null
+++ b/test/testcases/rset-xml.exp
@@ -0,0 +1,53 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" timeout="20" interval="3600" id="st-monitor-3600"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d3" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d5-stop-0s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_order id="o1" kind="Serialize" first="d1" then="d3"/>
+ <rsc_colocation id="c1" score="INFINITY">
+ <resource_set sequential="false" id="c1-1">
+ <resource_ref id="d1"/>
+ <resource_ref id="d3"/>
+ </resource_set>
+ </rsc_colocation>
+ <rsc_colocation id="c2" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ <rsc_colocation id="c3" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ </constraints>
+ </configuration>
+</cib>
diff --git a/test/testcases/rset.exp b/test/testcases/rset.exp
new file mode 100644
index 0000000..79b03f4
--- /dev/null
+++ b/test/testcases/rset.exp
@@ -0,0 +1,66 @@
+.TRY Resource sets
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: primitive st stonith:ssh params hostlist='node1' op start timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: order o1 Serialize: d1 d2 ( d3 d4 )
+.INP: colocation c1 inf: d4 ( d1 d2 d3 )
+.INP: colocation c2 inf: d1 d2 d3 d4
+.INP: colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+.INP: delete d2
+INFO: 16: constraint order:o1 updated
+INFO: 16: constraint colocation:c1 updated
+INFO: 16: constraint colocation:c2 updated
+INFO: 16: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: d4 ( d1 d3 )
+colocation c2 inf: d1 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 )
+order o1 Serialize: d1 ( d3 d4 )
+.INP: delete d4
+INFO: 18: constraint order:o1 updated
+INFO: 18: constraint colocation:c1 updated
+INFO: 18: constraint colocation:c2 updated
+INFO: 18: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ op start timeout=60s interval=0s \
+ op monitor timeout=20 interval=3600 \
+ op stop timeout=15 interval=0s
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: commit
diff --git a/test/testcases/scripts b/test/testcases/scripts
new file mode 100644
index 0000000..b89d75d
--- /dev/null
+++ b/test/testcases/scripts
@@ -0,0 +1,14 @@
+session Cluster scripts
+script
+list
+list all
+list names
+list names all
+list all names
+list bogus
+show mailto
+verify mailto id=foo email=test@example.com subject=hello
+run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+json '["show", "mailto"]'
+json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+.
diff --git a/test/testcases/scripts.exp b/test/testcases/scripts.exp
new file mode 100644
index 0000000..ca086c9
--- /dev/null
+++ b/test/testcases/scripts.exp
@@ -0,0 +1,305 @@
+.TRY Cluster scripts
+.INP: script
+.INP: list
+.EXT crm_resource --show-metadata ocf:heartbeat:apache
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr2
+.EXT crm_resource --show-metadata ocf:heartbeat:Filesystem
+.EXT crm_resource --show-metadata ocf:heartbeat:mysql
+.EXT crm_resource --show-metadata systemd:cryptctl-server
+.EXT crm_resource --show-metadata ocf:heartbeat:db2
+.EXT crm_resource --show-metadata ocf:heartbeat:exportfs
+.EXT crm_resource --show-metadata systemd:haproxy
+.EXT crm_resource --show-metadata ocf:heartbeat:LVM
+.EXT crm_resource --show-metadata ocf:heartbeat:MailTo
+.EXT crm_resource --show-metadata ocf:heartbeat:nginx
+.EXT crm_resource --show-metadata ocf:heartbeat:Raid1
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list all
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Script:
+
+check-uptime Check uptime of nodes
+gfs2-base GFS2 File System Base (Cloned)
+lvm Controls the availability of an LVM Volume Group
+raid1 Manages Linux software RAID (MD) devices on shared storage
+sapdb SAP Database Instance
+sapinstance SAP Instance
+sbd-device Create SBD Device
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list names
+apache
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+haproxy
+health
+libvirt
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sbd
+virtual-ip
+vmware
+.INP: list names all
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list all names
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list bogus
+ERROR: 7: script.list: Unexpected argument 'bogus': expected [all|names]
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("Unexpected argument '%s': expected [all|names]" % (arg))
+ raise ValueError(msg)
+ValueError: Unexpected argument 'bogus': expected [all|names]
+.INP: show mailto
+mailto (Basic)
+E-Mail
+
+Notifies recipient by e-mail in the event of a resource takeover.
+
+1. Notifies recipients by email in the event of resource takeover
+
+ id (required) (unique)
+ Identifier for the cluster resource
+ email (required)
+ Email address
+ subject
+ Subject
+
+
+.INP: verify mailto id=foo email=test@example.com subject=hello
+1. Ensure mail package is installed
+
+ mailx
+
+2. Configure cluster resources
+
+ primitive foo ocf:heartbeat:MailTo
+ email="test@example.com"
+ subject="hello"
+ op start timeout="10"
+ op stop timeout="10"
+ op monitor interval="10" timeout="10"
+
+ clone c-foo foo
+
+.INP: run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+INFO: 10: E-Mail
+INFO: 10: Nodes: node1
+** all - #!/usr/bin/env python3
+import crm_script
+import crm_init
+
+crm_init.install_packages(['mailx'])
+crm_script.exit_ok(True)
+
+INFO: 10: Ensure mail package is installed
+** localhost - temporary file <<END
+primitive foo ocf:heartbeat:MailTo email="test@example.com" subject="hello" op start timeout="10" op stop timeout="10" op monitor interval="10" timeout="10"
+clone c-foo foo
+
+END
+
+** localhost - crm --wait --no configure load update <<temporary file>>
+INFO: 10: Configure cluster resources
+.INP: json '["show", "mailto"]'
+{"category": "basic", "longdesc": "Notifies recipient by e-mail in the event of a resource takeover.", "name": "mailto", "shortdesc": "E-Mail", "steps": [{"longdesc": " This is a resource agent for MailTo. It sends email to a sysadmin\nwhenever a takeover occurs.", "parameters": [{"advanced": false, "longdesc": "", "name": "id", "required": true, "shortdesc": "Identifier for the cluster resource", "type": "resource", "unique": true}, {"advanced": false, "example": "", "longdesc": " The email address of sysadmin.", "name": "email", "required": true, "shortdesc": "Email address", "type": "email", "unique": false}, {"advanced": false, "example": "Resource Group", "longdesc": " The subject of the email.", "name": "subject", "required": false, "shortdesc": "Subject", "type": "string", "unique": false}], "required": true, "shortdesc": "Notifies recipients by email in the event of resource takeover"}]}
+.INP: json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+{"longdesc": "", "name": "install", "nodes": "", "shortdesc": "Ensure mail package is installed", "text": "mailx"}
+{"longdesc": "", "name": "cib", "nodes": "", "shortdesc": "Configure cluster resources", "text": "primitive foo ocf:heartbeat:MailTo\n\temail=\"test@example.com\"\n\tsubject=\"hello\"\n\top start timeout=\"10\"\n\top stop timeout=\"10\"\n\top monitor interval=\"10\" timeout=\"10\"\n\nclone c-foo foo"}
diff --git a/test/testcases/scripts.filter b/test/testcases/scripts.filter
new file mode 100755
index 0000000..05e098a
--- /dev/null
+++ b/test/testcases/scripts.filter
@@ -0,0 +1,4 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\*\* localhost - crm --wait --no configure load update (\/tmp\/crm-tmp-.+)/ { gsub(/.*/, "<<temporary file>>", $NF) }
+{ print }
diff --git a/test/testcases/shadow b/test/testcases/shadow
new file mode 100644
index 0000000..3bfd389
--- /dev/null
+++ b/test/testcases/shadow
@@ -0,0 +1,10 @@
+filesession Shadow CIB management
+cib
+new regtest force
+reset regtest
+use regtest
+commit regtest
+delete regtest
+use
+delete regtest
+.
diff --git a/test/testcases/shadow.exp b/test/testcases/shadow.exp
new file mode 100644
index 0000000..f5ec084
--- /dev/null
+++ b/test/testcases/shadow.exp
@@ -0,0 +1,24 @@
+.TRY Shadow CIB management
+.INP: cib
+.INP: new regtest force
+.EXT >/dev/null </dev/null crm_shadow -b -c 'regtest' --force
+INFO: 2: cib.new: regtest shadow CIB created
+.INP: reset regtest
+.EXT >/dev/null </dev/null crm_shadow -b -r 'regtest'
+INFO: 3: cib.reset: copied live CIB to regtest
+.INP: use regtest
+.INP: commit regtest
+.EXT >/dev/null </dev/null crm_shadow -b -C 'regtest' --force
+INFO: 5: cib.commit: committed 'regtest' shadow CIB to the cluster
+.INP: delete regtest
+ERROR: 6: cib.delete: regtest shadow CIB is in use
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s shadow CIB is in use" % name)
+ raise ValueError(msg)
+ValueError: regtest shadow CIB is in use
+.INP: use
+.INP: delete regtest
+.EXT >/dev/null </dev/null crm_shadow -b -D 'regtest' --force
+INFO: 8: cib.delete: regtest shadow CIB deleted
diff --git a/test/testcases/xmlonly.sh b/test/testcases/xmlonly.sh
new file mode 100755
index 0000000..15e6427
--- /dev/null
+++ b/test/testcases/xmlonly.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# extract the xml cib
+#
+sed -n '/^<?xml/,/^<\/cib>/p'
diff --git a/test/unittests/__init__.py b/test/unittests/__init__.py
new file mode 100644
index 0000000..18f2638
--- /dev/null
+++ b/test/unittests/__init__.py
@@ -0,0 +1,64 @@
+from __future__ import unicode_literals
+import os
+import sys
+
+try:
+ import crmsh
+except ImportError as e:
+ pass
+
+from crmsh import config
+from crmsh import options
+config.core.debug = True
+options.regression_tests = True
+_here = os.path.dirname(__file__)
+config.path.sharedir = os.path.join(_here, "../../doc")
+config.path.crm_dtd_dir = os.path.join(_here, "schemas")
+
+os.environ["CIB_file"] = "test"
+
+
+# install a basic CIB
+from crmsh import cibconfig
+
+_CIB = """
+<cib epoch="0" num_updates="0" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Mon Mar 3 23:58:36 2014" update-origin="ha-one" update-client="crmd" update-user="hacluster" crm_feature_set="3.0.9" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ <nvpair name="no-quorum-policy" value="ignore" id="cib-bootstrap-options-no-quorum-policy"/>
+ <nvpair name="dc-version" value="1.1.11+git20140221.0b7d85a-115.1-1.1.11+git20140221.0b7d85a" id="cib-bootstrap-options-dc-version"/>
+ <nvpair name="cluster-infrastructure" value="corosync" id="cib-bootstrap-options-cluster-infrastructure"/>
+ <nvpair name="symmetric-cluster" value="true" id="cib-bootstrap-options-symmetric-cluster"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="ha-one" uname="ha-one"/>
+ <node id="ha-two" uname="ha-two"/>
+ <node id="ha-three" uname="ha-three"/>
+ </nodes>
+ <resources>
+ </resources>
+ <constraints>
+ </constraints>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <nvpair name="migration-threshold" value="0" id="rsc-options-migration-threshold"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="op-options">
+ <nvpair name="timeout" value="200" id="op-options-timeout"/>
+ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ </status>
+</cib>
+"""
+
+cibconfig.cib_factory.initialize(cib=_CIB)
+
diff --git a/test/unittests/bug-862577_corosync.conf b/test/unittests/bug-862577_corosync.conf
new file mode 100644
index 0000000..09b1225
--- /dev/null
+++ b/test/unittests/bug-862577_corosync.conf
@@ -0,0 +1,51 @@
+# Please read the corosync.conf.5 manual page
+
+service {
+ ver: 1
+ name: pacemaker
+}
+totem {
+ version: 2
+ secauth: off
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+
+# Following are old corosync 1.4.x defaults from SLES
+# token: 5000
+# token_retransmits_before_loss_const: 10
+# join: 60
+# consensus: 6000
+# vsftype: none
+# max_messages: 20
+# threads: 0
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.122.2.13
+ mcastaddr: 239.91.185.71
+ mcastport: 5405
+ ttl: 1
+ }
+}
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 2
+}
diff --git a/test/unittests/corosync.conf.1 b/test/unittests/corosync.conf.1
new file mode 100644
index 0000000..7b3abed
--- /dev/null
+++ b/test/unittests/corosync.conf.1
@@ -0,0 +1,81 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ # crypto_cipher and crypto_hash: Used for mutual node authentication.
+ # If you choose to enable this, then do remember to create a shared
+ # secret with "corosync-keygen".
+ # enabling crypto_cipher, requires also enabling of crypto_hash.
+ crypto_cipher: none
+ crypto_hash: none
+
+ # interface: define at least one interface to communicate
+ # over. If you define more than one interface stanza, you must
+ # also set rrp_mode.
+ interface {
+ # Rings must be consecutively numbered, starting at 0.
+ ringnumber: 0
+ # This is normally the *network* address of the
+ # interface to bind to. This ensures that you can use
+ # identical instances of this configuration file
+ # across all your cluster nodes, without having to
+ # modify this option.
+ bindnetaddr: 192.168.1.0
+ # However, if you have multiple physical network
+ # interfaces configured for the same subnet, then the
+ # network address alone is not sufficient to identify
+ # the interface Corosync should bind to. In that case,
+ # configure the *host* address of the interface
+ # instead:
+ # bindnetaddr: 192.168.1.1
+ # When selecting a multicast address, consider RFC
+ # 2365 (which, among other things, specifies that
+ # 239.255.x.x addresses are left to the discretion of
+ # the network administrator). Do not reuse multicast
+ # addresses across multiple Corosync clusters sharing
+ # the same network.
+ mcastaddr: 239.255.1.1
+ # Corosync uses the port you specify here for UDP
+ # messaging, and also the immediately preceding
+ # port. Thus if you set this to 5405, Corosync sends
+ # messages over UDP ports 5405 and 5404.
+ mcastport: 5405
+ # Time-to-live for cluster communication packets. The
+ # number of hops (routers) that this ring will allow
+ # itself to pass. Note that multicast routing must be
+ # specifically enabled on most network routers.
+ ttl: 1
+ }
+}
+
+logging {
+ # Log the source file and line where messages are being
+ # generated. When in doubt, leave off. Potentially useful for
+ # debugging.
+ fileline: off
+ # Log to standard error. When in doubt, set to no. Useful when
+ # running in the foreground (when invoking "corosync -f")
+ to_stderr: no
+ # Log to a log file. When set to "no", the "logfile" option
+ # must not be set.
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ # Log to the system log daemon. When in doubt, set to yes.
+ to_syslog: yes
+ # Log debug messages (very verbose). When in doubt, leave off.
+ debug: off
+ # Log messages with time stamps. When in doubt, set to on
+ # (unless you are only logging to syslog, where double
+ # timestamps can be annoying).
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ #provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.2 b/test/unittests/corosync.conf.2
new file mode 100644
index 0000000..0438451
--- /dev/null
+++ b/test/unittests/corosync.conf.2
@@ -0,0 +1,58 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.16.35.0
+ mcastport: 5405
+ ttl: 1
+ }
+ transport: udpu
+}
+
+logging {
+ fileline: off
+ to_logfile: yes
+ to_syslog: yes
+ logfile: /var/log/cluster/corosync.log
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.16.35.101
+ nodeid: 1
+ }
+
+ node {
+ ring0_addr: 10.16.35.102
+ nodeid: 2
+ }
+
+ node {
+ ring0_addr: 10.16.35.103
+ }
+
+ node {
+ ring0_addr: 10.16.35.104
+ }
+
+ node {
+ ring0_addr: 10.16.35.105
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.3 b/test/unittests/corosync.conf.3
new file mode 100644
index 0000000..2cc001f
--- /dev/null
+++ b/test/unittests/corosync.conf.3
@@ -0,0 +1,68 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+ secauth: on
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ token: 5000
+ token_retransmits_before_loss_const: 10
+ join: 60
+ consensus: 6000
+ max_messages: 20
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.67.16.0
+ mcastaddr: 239.23.255.56
+ mcastport: 5405
+ ttl: 1
+ }
+
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 0
+ two_node: 0
+ device {
+ votes: 1
+ model: net
+ net {
+ tls: on
+ host: 10.10.10.3
+ port: 5403
+ algorithm: ffsplit
+ tie_breaker: lowest
+ }
+
+ }
+
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.67.18.221
+ nodeid: 172167901
+ }
+
+}
+
diff --git a/test/unittests/pacemaker.log b/test/unittests/pacemaker.log
new file mode 100644
index 0000000..1da52a6
--- /dev/null
+++ b/test/unittests/pacemaker.log
@@ -0,0 +1,923 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: Detected an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: Reading configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: Starting Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: server name: pacemakerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry 58f6784c-39df-4fbe-90df-d462a893c0d4/0x55b94329e2e0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1943 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1944 for process pacemaker-fenced
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1945 for process pacemaker-execd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1946 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1947 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1948 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Starting mainloop
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: server name: lrmd
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: Starting
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (qb_ipcs_us_publish) info: server name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: Starting pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: /var/lib/pacemaker/cib/cib.xml not found: No such file or directory
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.xml (digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Primary configuration corrupt or unusable, trying backups in /var/lib/pacemaker/cib
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Continuing with an empty configuration.
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (validate_with_relaxng) info: Creating RNG parser context
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry ce9f3668-a138-4e36-aec8-124d76e0e8b8/0x5649957b59c0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (main) info: Starting up
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (main) info: CRM Git Version: 2.0.1+20190304.9e909a5bd-1.4 (2.0.1+20190304.9e909a5bd)
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_STARTUP received in state S_STARTING from crmd_init
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (startCib) info: CIB Initialization completed successfully
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 6c579ba7-433c-4d00-88f8-a4a9534cd017/0x56042ff25eb0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_ro
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_rw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_shm
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_init) info: Starting pacemaker-based mainloop
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-0.raw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.0.0 of the CIB to disk (digest: 48469f360effdb63efdbbf08822875d8)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.glQmxd (digest: /var/lib/pacemaker/cib/cib.OJiM5q)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: CIB connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry 97d13205-d013-44ab-bd52-01d8ec4132f7/0x55995aef1580 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Cluster connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_erase_attrs) info: Clearing transient attributes from CIB | xpath=//node_state[@uname='15sp1-1']/transient_attributes
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (qb_ipcs_us_publish) info: server name: attrd
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Accepting attribute updates
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-1]: (null) -> 2 from 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_start_election_if_needed) info: Starting an election to determine the writer
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (election_check) info: election-attrd won by local node
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_declare_winner) notice: Recorded local node as attribute writer (was unset)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 1 private change for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes to all (origin=local/attrd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 0e996957-89f6-4cd2-af8f-271088c53399/0x558f48e15840 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes: OK (rc=0, origin=15sp1-1/attrd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now in unknown state
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (setup_cib) info: Watching for stonith topology changes
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (qb_ipcs_us_publish) info: server name: stonith-ng
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (main) info: Starting pacemaker-fenced mainloop
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (init_cib_cache_cb) info: Updating device list from the cib: init
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (cib_devices_update) info: Updating devices to version 0.0.0
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (unpack_nodes) info: Creating a fake local node
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_ha_control) info: Connected to the cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (lrmd_ipc_connect) info: Connecting to executor
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_lrm_control) info: Connection to the executor established
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, no membership data (0000000000100000)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now member (was in unknown state)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (qb_ipcs_us_publish) info: server name: crmd
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) notice: The local CRM is operational
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_PENDING received in state S_STARTING from do_started
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_STARTING -> S_PENDING | input=I_PENDING cause=C_FSA_INTERNAL origin=do_started
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Election Trigger (I_DC_TIMEOUT) just popped (20000ms)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) warning: Input I_DC_TIMEOUT received in state S_PENDING from crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_PENDING -> S_ELECTION | input=I_DC_TIMEOUT cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (election_check) info: election-DC won by local node
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_ELECTION_DC received in state S_ELECTION from election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_ELECTION -> S_INTEGRATION | input=I_ELECTION_DC cause=C_FSA_INTERNAL origin=election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_te_control) info: Registering TE UUID: 305c37e8-0981-497b-a285-c430070e70ae
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (set_graph_functions) info: Setting custom graph functions
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_takeover) info: Taking over DC status for this partition
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_readwrite) info: We are now in R/W mode
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_master operation for section 'all': OK (rc=0, origin=local/crmd/5, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/6)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/6, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/8)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.0.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.1.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config: <cluster_property_set id="cib-bootstrap-options"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </cluster_property_set>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/8, version=0.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/10)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.1.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.2.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.1+20190304.9e909a5bd-1.4-2.0.1+20190304.9e909a5bd"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/10, version=0.2.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/12)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.2.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.3.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 4
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_all) info: join-1: Waiting on 1 outstanding join acks
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (update_dc) info: Set DC to 15sp1-1 (3.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: update_dc: Node 15sp1-1[1] - expected state is now member (was (null))
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/12, version=0.3.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/14)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.3.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=4
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="hacluster"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/14, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 1df7ee72464178ed9ef4d38760c5c496
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.4.0 with 0.4.0 from 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/19, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/21)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/22)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/23)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.1 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="1" uname="15sp1-1"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/21, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/22, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.1 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.2 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="1" uname="15sp1-1" in_ccm="true" crmd="online" crm-debug-origin="do_lrm_query_internal"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm id="1">
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </node_state>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/23, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=27)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition -1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/25)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/26)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/27)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/25, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.2 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.3 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/26, version=0.4.3)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.3 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.4 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4, @have-quorum=1, @dc-uuid=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/27, version=0.4.4)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-1.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.1.0 of the CIB to disk (digest: da12c1ea82516c83c42bbb6af78f7c43)
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.Q2Mefv (digest: /var/lib/pacemaker/cib/cib.lYgCoM)
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Resource start-up disabled since no STONITH resources have been defined
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Either configure some or disable STONITH with the stonith-enabled option
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status_fencing) info: Node 15sp1-1 is active
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 0, saving inputs in /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Configuration errors found during scheduler processing, please run "crm_verify -L" to identify issues
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 0 (ref=pe_calc-dc-1554260501-7) derived from /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 0 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-0.bz2): Complete
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-2.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.4.0 of the CIB to disk (digest: 41a7a4f3446765b9550c8eed97655f87)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.k86jdq (digest: /var/lib/pacemaker/cib/cib.2VIuZJ)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_apply_diff operation for section 'all' to all (origin=local/cibadmin/2)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.4 2
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.0 (null)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=5, @num_updates=0
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="placement-strategy" value="balanced" id="cib-bootstrap-options-placement-strategy"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <rsc_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="rsc-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="migration-threshold" value="3" id="rsc-options-migration-threshold"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </rsc_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <op_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="op-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="timeout" value="600" id="op-options-timeout"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </op_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_apply_diff operation for section 'all': OK (rc=0, origin=15sp1-1/cibadmin/2, version=0.5.0)
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 0 aborted by cib-bootstrap-options-stonith-enabled doing create stonith-enabled=false: Configuration change | cib=0.5.0 source=te_update_diff_v2:499 path=/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options'] complete=true
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-3.raw
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: 80483689fd341b672c06963bb25bdd6b)
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.ZE4jzT (digest: /var/lib/pacemaker/cib/cib.xCbzOh)
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_config) warning: Blind faith: not fencing unseen nodes
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 1, saving inputs in /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 1 (ref=pe_calc-dc-1554260503-8) derived from /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 1 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-1.bz2): Complete
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:47 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 8d4ba34bb3113e36afd6b6bf39fb69a0 for 0.5.0 (0x5604300bf500 0)
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (crm_procfs_pid_of) info: Found pacemaker-based active as process 1943
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) notice: High CPU load detected: 1.040000
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0100 (was ffffffff)
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) info: Moderate CPU load detected: 0.810000
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0010 (was 0100)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 1f91ec8f-7986-4c15-be46-302b53ff3193/0x558f48ea7bf0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry de9bd295-1272-4db2-bdc6-6b1906ae5553/0x55b9435a50e0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/34)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/35)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/34, version=0.5.0)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.0 2
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.1 (null)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="2" in_ccm="true" crmd="offline" crm-debug-origin="post_cache_update"/>
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/35, version=0.5.1)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 82f3fa83-e1aa-4b46-99aa-91c7dda4969a/0x5649958bed90 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry f747eb13-dfe9-4182-9b3e-00d9f416e88e/0x56042fb54140 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry f2f954cd-386a-4993-9142-8621ae195416/0x55995aef4080 for node (null)/2 (2 total)
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_HA_MESSAGE origin=route_message
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 8
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was (null))
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: e5b55e525a867a8154545eca60a3828b
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.1 with 0.5.1 from 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/37, version=0.5.1)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/38)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/39)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.1 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.2 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="2" uname="15sp1-2"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/38, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/39, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/40)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/41)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.2 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.3 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/40, version=0.5.3)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.3 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.4 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/41, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-4.raw
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: bd5d2bae72ccab0f8431984061bf46bf)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/42)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/43)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/42, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.4 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.5 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=5
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=do_lrm_query_internal, @uname=15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/43, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=48)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/46)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/47)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/48)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/46, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.5 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.6 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=6
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/47, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/48, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 2, saving inputs in /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 2 (ref=pe_calc-dc-1554260554-18) derived from /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 2 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-2.bz2): Complete
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.4bNgNm (digest: /var/lib/pacemaker/cib/cib.MRpEpc)
+Apr 03 11:02:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 44ae77e9ff79c954e9d39a4b11a48f55 for 0.5.6 (0x56042ffaf6e0 0)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: handle_request: Node 15sp1-2[2] - expected state is now down (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (handle_shutdown_request) info: Creating shutdown request for 15sp1-2 (state=S_IDLE)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting shutdown[15sp1-2]: (null) -> 1554260567 from 15sp1-1
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Sent CIB request 4 with 1 change for shutdown (id n/a, set n/a)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/attrd/4)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.6 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.7 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=7
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <transient_attributes id="2"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <instance_attributes id="status-2">
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="status-2-shutdown" name="shutdown" value="1554260567"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </instance_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </transient_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 2 aborted by transient_attributes.2 'create': Transient attribute change | cib=0.5.7 source=abort_unless_down:329 path=/cib/status/node_state[@id='2'] complete=true
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/attrd/4, version=0.5.7)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: CIB update 4 result for shutdown: OK | rc=0
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: * shutdown[15sp1-2]=1554260567
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is shutting down
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (sched_shutdown_op) notice: Scheduling shutdown of node 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (LogNodeActions) notice: * Shutdown 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 3, saving inputs in /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 3 (ref=pe_calc-dc-1554260567-19) derived from /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (te_crm_command) info: Executing crm-event (1) without waiting: do_shutdown on 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 3 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-3.bz2): Complete
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is no longer a peer | DC=true old=0x4000000 new=0x0000000
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting transient_attributes status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/transient_attributes
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes to all (origin=local/crmd/51)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: crmd_peer_down: Node 15sp1-2[2] - join-1 phase confirmed -> none
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: do_shutdown of peer 15sp1-2 is in progress | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/52)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.7 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.8 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/transient_attributes[@id='2']
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=8
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-1/crmd/51, version=0.5.8)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.8 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.9 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=9
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=offline, @crm-debug-origin=peer_update_callback, @join=down, @expected=down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/52, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_remove) notice: Removing all 15sp1-2 attributes for peer loss
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_shutdown_req) info: Peer 15sp1-2 is requesting to shut down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now lost (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) notice: do_shutdown of peer 15sp1-2 is complete | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/53)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/56)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/57)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/53, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/56, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.9 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.10 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=10
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=false, @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/57, version=0.5.10)
+Apr 03 11:02:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0001 (was 0010)
+Apr 03 11:02:52 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 721b7cebe807ad0faf4a6dc35780fe91 for 0.5.10 (0x560430039b10 0)
+Apr 03 11:03:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0000 (was 0001)
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member (was lost)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/58)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.10 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.11 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=11
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=peer_update_callback
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/58, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/61)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/62)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/61, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.11 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.12 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=12
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=true, @crm-debug-origin=post_cache_update
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/62, version=0.5.12)
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 joined
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 5c9c833c-faec-4e40-9451-1eca51fe31c1/0x5649958c6240 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 53c0909d-78ff-49b5-bf79-9ef7ceb014aa/0x56042ffb1100 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry cc7d88cd-ec11-4a95-9820-ec156175b0ca/0x55995aef85e0 for node (null)/2 (2 total)
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 (<unknown>) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.12)
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is now a peer | DC=true old=0x0000000 new=0x4000000
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (te_trigger_stonith_history_sync) info: Fence history will be synchronized cluster-wide within 5 seconds
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_FSA_INTERNAL origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: An unknown node joined - (re-)offer to any unconfirmed nodes
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 16
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Skipping 15sp1-1: already known 4
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/63)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.12 2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.13 (null)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=13
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/63, version=0.5.13)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.13)
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_offer_one: Node 15sp1-2[2] - join-1 phase welcomed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was down)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 9cc271d2c23b97671004273302f97501
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.13 with 0.5.13 from 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/65, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/66)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/67)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/68)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/69)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/66, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/67, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.13 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.14 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=14
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/68, version=0.5.14)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.14 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.15 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=15
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/69, version=0.5.15)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/70)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=76)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/71)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.15 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.16 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/lrm[@id='2']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=16
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/70, version=0.5.16)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.16 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.17 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=17
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/71, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/74)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/75)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/76)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/74, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.17 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.18 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=18
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/75, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/76, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 4, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 4 (ref=pe_calc-dc-1554260614-32) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 4 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-5.raw
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: ca3eacfa6d368fd79cf391411a7d16de)
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.F8yvtW (digest: /var/lib/pacemaker/cib/cib.iAVwFF)
+Apr 03 11:03:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 5d5b6ac1becdd43a5327925a8d1f5579 for 0.5.18 (0x56042ffb12f0 0)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 5, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 5 (ref=pe_calc-dc-1554261514-33) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 5 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 6, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 6 (ref=pe_calc-dc-1554262414-34) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 6 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 7, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 7 (ref=pe_calc-dc-1554263314-35) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 7 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 8, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 8 (ref=pe_calc-dc-1554264214-36) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 8 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 9, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 9 (ref=pe_calc-dc-1554265114-37) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 9 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 10, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 10 (ref=pe_calc-dc-1554266014-38) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 10 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 11, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 11 (ref=pe_calc-dc-1554266914-39) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 11 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 12, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 12 (ref=pe_calc-dc-1554267814-40) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 12 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 13, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 13 (ref=pe_calc-dc-1554268714-41) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 13 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: Current ping state: S_IDLE
diff --git a/test/unittests/pacemaker.log.2 b/test/unittests/pacemaker.log.2
new file mode 100644
index 0000000..bd189cc
--- /dev/null
+++ b/test/unittests/pacemaker.log.2
@@ -0,0 +1,3 @@
+Jan 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Jan 03 11:03:41 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Jan 03 11:03:51 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
diff --git a/test/unittests/pacemaker_unicode.log b/test/unittests/pacemaker_unicode.log
new file mode 100644
index 0000000..47aaa31
--- /dev/null
+++ b/test/unittests/pacemaker_unicode.log
@@ -0,0 +1,30 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+� ∀↱
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: ¶ an ⅓ 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: ⚽ configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: → Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615Ḽơᶉëᶆ ȋṕšᶙṁ ḍỡḽǭᵳ ʂǐť ӓṁệẗ, ĉṓɲṩḙċťᶒțûɾ ấɖḯƥĭṩčįɳġ ḝłįʈ, șếᶑ ᶁⱺ ẽḭŭŝḿꝋď ṫĕᶆᶈṓɍ ỉñḉīḑȋᵭṵńť ṷŧ ḹẩḇőꝛế éȶ đꝍꞎôꝛȇ ᵯáꞡᶇā ąⱡîɋṹẵ.
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: 你好 \xf0\x28\x8c\x28: pac我很特殊
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: κόσμε
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: a a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: \xc3\x28 a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: � d�ectory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-sche,̆dulerd[1947] (qb_ipcs_us_publish) info: 𐀀 name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: �����r-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: ������ directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync' ������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: � � � � � � � � � � � � � � � �
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: �����������������������������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster\xa0\xa1 configuration file /var/lib/pacemaker/cib/cib.xml (﷐﷑﷒﷓﷔﷕﷖﷗﷘﷙﷚﷛﷜﷝﷞﷟﷠﷡﷢﷣﷤﷥﷦﷧﷨﷩﷪﷫﷬﷭﷮﷯digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory \xF0\xA4\xAD (2)
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /v \xf0\x90\x8c\xb car/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0,åabc Fired=0, Skipped=0, \xf0\x28\x8c\xbc Incomplete=0, Source=/var/lib/pacemake \xf0\x90\x28\xbcr/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE fro\xf8\xa1\xa1\xa1\xa1m \xf0\x28\x8c\x28notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition \xfc\xa1\xa1\xa1\xa1\xa1S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: \xfc\xa1\xa1\xa1\xa1\xa1 test_unicode
diff --git a/test/unittests/schemas/acls-1.1.rng b/test/unittests/schemas/acls-1.1.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.1.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/acls-1.2.rng b/test/unittests/schemas/acls-1.2.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.2.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.0.rng b/test/unittests/schemas/constraints-1.0.rng
new file mode 100644
index 0000000..5a4474a
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.0.rng
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.1.rng b/test/unittests/schemas/constraints-1.1.rng
new file mode 100644
index 0000000..fff0fb7
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.1.rng
@@ -0,0 +1,246 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="rsc-pattern"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ </choice>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="domain"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="node"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </group>
+ </choice>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="first-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="then-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.2.rng b/test/unittests/schemas/constraints-1.2.rng
new file mode 100644
index 0000000..221140c
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.2.rng
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/fencing.rng b/test/unittests/schemas/fencing.rng
new file mode 100644
index 0000000..87de5a8
--- /dev/null
+++ b/test/unittests/schemas/fencing.rng
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-stonith"/>
+ </start>
+
+ <define name="element-stonith">
+ <element name="fencing-topology">
+ <zeroOrMore>
+ <ref name="element-level"/>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-level">
+ <element name="fencing-level">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="target"><text/></attribute>
+ <attribute name="index"><data type="positiveInteger"/></attribute>
+ <attribute name="devices">
+ <data type="string">
+ <param name="pattern">([a-zA-Z0-9_\.\-]+)(,[a-zA-Z0-9_\.\-]+)*</param>
+ </data>
+ </attribute>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/nvset.rng b/test/unittests/schemas/nvset.rng
new file mode 100644
index 0000000..0d7e72c
--- /dev/null
+++ b/test/unittests/schemas/nvset.rng
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nvset"/>
+ </start>
+
+ <define name="element-nvset">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <interleave>
+ <optional>
+ <externalRef href="rule.rng"/>
+ </optional>
+ <zeroOrMore>
+ <element name="nvpair">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </element>
+ </zeroOrMore>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ </interleave>
+ </group>
+ </choice>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.0.rng b/test/unittests/schemas/pacemaker-1.0.rng
new file mode 100644
index 0000000..7100393
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.0.rng
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.0.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.0.rng"/>
+ </element>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.1.rng b/test/unittests/schemas/pacemaker-1.1.rng
new file mode 100644
index 0000000..50e9458
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.1.rng
@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.1.rng"/>
+ </element>
+ <optional>
+ <element name="domains">
+ <zeroOrMore>
+ <element name="domain">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="name"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <element name="constraints">
+ <externalRef href="constraints-1.1.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.1.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.2.rng b/test/unittests/schemas/pacemaker-1.2.rng
new file mode 100644
index 0000000..33a7d2d
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.2.rng
@@ -0,0 +1,146 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.2.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.2.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.2.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.0.rng b/test/unittests/schemas/resources-1.0.rng
new file mode 100644
index 0000000..7ea2228
--- /dev/null
+++ b/test/unittests/schemas/resources-1.0.rng
@@ -0,0 +1,177 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.1.rng b/test/unittests/schemas/resources-1.1.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.1.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.2.rng b/test/unittests/schemas/resources-1.2.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.2.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/rule.rng b/test/unittests/schemas/rule.rng
new file mode 100644
index 0000000..242eff8
--- /dev/null
+++ b/test/unittests/schemas/rule.rng
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-rule"/>
+ </start>
+
+ <define name="element-rule">
+ <element name="rule">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="boolean-op">
+ <choice>
+ <value>or</value>
+ <value>and</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role"><text/></attribute>
+ </optional>
+ <oneOrMore>
+ <choice>
+ <element name="expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="attribute"><text/></attribute>
+ <attribute name="operation">
+ <choice>
+ <value>lt</value>
+ <value>gt</value>
+ <value>lte</value>
+ <value>gte</value>
+ <value>eq</value>
+ <value>ne</value>
+ <value>defined</value>
+ <value>not_defined</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type" ann:defaultValue="string">
+ <choice>
+ <value>string</value>
+ <value>number</value>
+ <value>version</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ <element name="date_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="operation"><value>in_range</value></attribute>
+ <choice>
+ <group>
+ <optional>
+ <attribute name="start"><text/></attribute>
+ </optional>
+ <attribute name="end"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="start"><text/></attribute>
+ <element name="duration">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>gt</value></attribute>
+ <attribute name="start"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="operation"><value>lt</value></attribute>
+ <choice>
+ <attribute name="end"><text/></attribute>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>date_spec</value></attribute>
+ <element name="date_spec">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </element>
+ <ref name="element-rule"/>
+ </choice>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="date-common">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="hours"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="monthdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="yearsdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="months"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weeks"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="years"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekyears"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="moon"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/score.rng b/test/unittests/schemas/score.rng
new file mode 100644
index 0000000..57b10f2
--- /dev/null
+++ b/test/unittests/schemas/score.rng
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-score"/>
+ </start>
+
+ <define name="attribute-score">
+ <attribute name="score">
+ <choice>
+ <data type="integer"/>
+ <value>INFINITY</value>
+ <value>+INFINITY</value>
+ <value>-INFINITY</value>
+ </choice>
+ </attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/schemas/versions.rng b/test/unittests/schemas/versions.rng
new file mode 100644
index 0000000..ab4e4ea
--- /dev/null
+++ b/test/unittests/schemas/versions.rng
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-version"/>
+ </start>
+
+ <define name="attribute-version">
+ <attribute name="validate-with">
+ <choice>
+ <value>none</value>
+ <value>pacemaker-0.6</value>
+ <value>transitional-0.6</value>
+ <value>pacemaker-0.7</value>
+ <value>pacemaker-1.0</value>
+ <value>pacemaker-1.1</value>
+ <value>pacemaker-1.2</value>
+ </choice>
+ </attribute>
+ <attribute name="admin_epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="num_updates"><data type="nonNegativeInteger"/></attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/scripts/inc1/main.yml b/test/unittests/scripts/inc1/main.yml
new file mode 100644
index 0000000..8c290d3
--- /dev/null
+++ b/test/unittests/scripts/inc1/main.yml
@@ -0,0 +1,22 @@
+version: 2.2
+shortdesc: Include test script 1
+longdesc: Test if includes work ok
+parameters:
+ - name: foo
+ type: boolean
+ shortdesc: An optional feature
+ - name: bar
+ type: string
+ shortdesc: A string of characters
+ value: the name is the game
+ - name: is-required
+ type: int
+ required: true
+actions:
+ - call: ls /tmp
+ when: foo
+ shortdesc: ls
+ - call: "echo '{{foo}}'"
+ shortdesc: foo
+ - call: "echo '{{bar}}'"
+ shortdesc: bar
diff --git a/test/unittests/scripts/inc2/main.yml b/test/unittests/scripts/inc2/main.yml
new file mode 100644
index 0000000..4910696
--- /dev/null
+++ b/test/unittests/scripts/inc2/main.yml
@@ -0,0 +1,26 @@
+---
+- version: 2.2
+ shortdesc: Includes another script
+ longdesc: This one includes another script
+ parameters:
+ - name: wiz
+ type: string
+ - name: foo
+ type: boolean
+ shortdesc: A different foo
+ include:
+ - script: inc1
+ name: included-script
+ parameters:
+ - name: is-required
+ value: 33
+ actions:
+ - call: "echo 'before {{wiz}}'"
+ shortdesc: before wiz
+ - include: included-script
+ - call: "echo 'after {{foo}}'"
+ shortdesc: after foo
+ - cib: |
+ {{included-script:is-required}}
+ - cib: |
+ {{wiz}}
diff --git a/test/unittests/scripts/legacy/main.yml b/test/unittests/scripts/legacy/main.yml
new file mode 100644
index 0000000..ef5d35b
--- /dev/null
+++ b/test/unittests/scripts/legacy/main.yml
@@ -0,0 +1,52 @@
+---
+- name: Initialize a new cluster
+ description: >
+ Initializes a new cluster on the nodes provided. Will try to
+ configure SSH if not already configured, and install missing
+ packages.
+
+ A more user-friendly interface to this script is provided by the
+ cluster init command.
+ parameters:
+ - name: iface
+ description: "Use the given interface. Try to auto-detect interface by default."
+ default: ""
+
+ - name: transport
+ description: "Corosync transport (mcast or udpu)"
+ default: "udpu"
+
+ - name: bindnetaddr
+ description: "Network address to bind to (e.g.: 192.168.1.0)"
+ default: ""
+
+ - name: mcastaddr
+ description: "Multicast address (e.g.: 239.x.x.x)"
+ default: ""
+
+ - name: mcastport
+ description: "Multicast port"
+ default: 5405
+
+ steps:
+ - name: Configure SSH
+ apply_local: configure.py ssh
+
+ - name: Check state of nodes
+ collect: collect.py
+
+ - name: Verify parameters
+ validate: verify.py
+
+ - name: Install packages
+ apply: configure.py install
+
+ - name: Generate corosync authkey
+ apply_local: authkey.py
+
+ - name: Configure cluster nodes
+ apply: configure.py corosync
+
+ - name: Initialize cluster
+ apply_local: init.py
+
diff --git a/test/unittests/scripts/templates/apache.xml b/test/unittests/scripts/templates/apache.xml
new file mode 100644
index 0000000..faf3ef0
--- /dev/null
+++ b/test/unittests/scripts/templates/apache.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<template name="apache">
+
+<shortdesc lang="en">Apache Web Server</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type apache.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this Apache resource in the cluster.
+</longdesc>
+<content type="string" default="apache"/>
+</parameter>
+
+<parameter name="configfile" required="1">
+<shortdesc lang="en">Apache config file</shortdesc>
+<longdesc lang="en">
+Full pathname of the Apache configuration file</longdesc>
+<content type="string" default="/etc/apache2/httpd.conf"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:apache
+ params
+ configfile="<insert param="configfile"/>"
+ op start timeout="40" op stop timeout="60"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/templates/virtual-ip.xml b/test/unittests/scripts/templates/virtual-ip.xml
new file mode 100644
index 0000000..22ab5bf
--- /dev/null
+++ b/test/unittests/scripts/templates/virtual-ip.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<template name="virtual-ip">
+
+<shortdesc lang="en">Virtual IP Address</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type IPaddr2.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this virtual IP address resource in the cluster.
+</longdesc>
+<content type="string" default="virtual-ip"/>
+</parameter>
+
+<parameter name="ip" required="1">
+<shortdesc lang="en">IP address</shortdesc>
+<longdesc lang="en">
+The IPv4 address to be configured in dotted quad notation,
+for example "192.168.1.1".
+</longdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="netmask">
+<shortdesc lang="en">Netmask</shortdesc>
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0).
+
+If unspecified, it will be determined automatically.
+</longdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="lvs_support">
+<shortdesc lang="en">LVS support</shortdesc>
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+</longdesc>
+<content type="boolean"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:IPaddr2
+ params
+ ip="<insert param="ip"/>"
+ <if set="netmask">cidr_netmask="<insert param="netmask"/>"</if>
+ <if set="lvs_support">lvs_support="<insert param="lvs_support"/>"</if>
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/unified/main.yml b/test/unittests/scripts/unified/main.yml
new file mode 100644
index 0000000..29f5d07
--- /dev/null
+++ b/test/unittests/scripts/unified/main.yml
@@ -0,0 +1,26 @@
+version: 2.2
+shortdesc: Unified Script
+longdesc: >
+ Test if we can define multiple steps in a single script
+category: test
+steps:
+ - parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: Identifier
+ - name: vip
+ shortdesc: Configure the virtual IP
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: IP Identifier
+ - name: ip
+ type: ip_address
+ required: true
+ shortdesc: The IP Address
+actions:
+ - cib: |
+ primitive {{vip:id}} IPaddr2 ip={{vip:ip}}
+ group g-{{id}} {{id}} {{vip:id}}
diff --git a/test/unittests/scripts/v2/main.yml b/test/unittests/scripts/v2/main.yml
new file mode 100644
index 0000000..41822a2
--- /dev/null
+++ b/test/unittests/scripts/v2/main.yml
@@ -0,0 +1,46 @@
+---
+- version: 2.2
+ shortdesc: Apache Webserver
+ longdesc: >
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+ category: Server
+ parameters:
+ - name: id
+ shortdesc: The ID specified here is for the web server resource group.
+ - name: install
+ type: boolean
+ value: true
+ shortdesc: Disable if no installation should be performed
+ include:
+ - agent: test:apache
+ parameters:
+ - name: id
+ value: "{{id}}-server"
+ - name: configfile
+ type: file
+ ops: |
+ op monitor interval=20s timeout=20s
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ value: "{{id}}-ip"
+ - name: ip
+ type: ip_address
+ ops: |
+ op monitor interval=20s timeout=20s
+ actions:
+ - install:
+ - apache2
+ when: install
+ - call: a2enable mod_status
+ shortdesc: Enable status module
+ nodes: all
+ when: install
+ - cib: |
+ {{virtual-ip}}
+ {{apache}}
+ group {{id}}
+ {{virtual-ip:id}}
+ {{apache:id}}
diff --git a/test/unittests/scripts/vip/main.yml b/test/unittests/scripts/vip/main.yml
new file mode 100644
index 0000000..4f3bde1
--- /dev/null
+++ b/test/unittests/scripts/vip/main.yml
@@ -0,0 +1,28 @@
+---
+- version: 2.2
+ shortdesc: Virtual IP
+ category: Basic
+ include:
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: ipaddress
+ required: false
+ - name: lvs_support
+ required: false
+ type: boolean
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+ actions:
+ - include: virtual-ip
diff --git a/test/unittests/scripts/vipinc/main.yml b/test/unittests/scripts/vipinc/main.yml
new file mode 100644
index 0000000..6741885
--- /dev/null
+++ b/test/unittests/scripts/vipinc/main.yml
@@ -0,0 +1,14 @@
+version: 2.2
+category: Test
+shortdesc: Test script include
+include:
+ - script: vip
+ parameters:
+ - name: id
+ value: vip1
+ - name: ip
+ value: 192.168.200.100
+actions:
+ - include: vip
+ - cib: |
+ clone c-{{vip:id}} {{vip:id}}
diff --git a/test/unittests/scripts/workflows/10-webserver.xml b/test/unittests/scripts/workflows/10-webserver.xml
new file mode 100644
index 0000000..f18d55a
--- /dev/null
+++ b/test/unittests/scripts/workflows/10-webserver.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<workflow name="10-webserver">
+
+<shortdesc lang="en">Web Server</shortdesc>
+<longdesc lang="en">
+Configure a resource group containing a virtual IP address and
+an instance of the Apache web server. You may wish to use this
+in conjunction with a filesystem resource; in this case you will
+need to separately configure the filesystem then add colocation
+and ordering constraints to have it start before the resource
+group you create here.
+</longdesc>
+
+<parameters>
+<stepdesc lang="en">
+The ID specified here is for the web server resource group.
+</stepdesc>
+<parameter name="id" required="1">
+<shortdesc lang="en">Group ID</shortdesc>
+<longdesc lang="en">
+Unique ID for the web server resource group in the cluster.
+</longdesc>
+<content type="string" default="web-server"/>
+</parameter>
+</parameters>
+
+<templates>
+<template name="virtual-ip" required="1">
+<stepdesc lang="en">
+The IP address configured here will start before the Apache instance.
+</stepdesc>
+</template>
+<template name="apache" required="1">
+<stepdesc lang="en">
+The Apache configuration file specified here must be available via the
+same path on all cluster nodes, and Apache must be configured with
+mod_status enabled. If in doubt, try running Apache manually via
+its init script first, and ensure http://localhost:80/server-status is
+accessible.
+</stepdesc>
+</template>
+</templates>
+
+<crm_script>
+group <insert param="id"/>
+ <insert param="id" from_template="virtual-ip"/>
+ <insert param="id" from_template="apache"/>
+</crm_script>
+
+</workflow>
diff --git a/test/unittests/test.conf b/test/unittests/test.conf
new file mode 100644
index 0000000..fe75686
--- /dev/null
+++ b/test/unittests/test.conf
@@ -0,0 +1,12 @@
+[path]
+sharedir = ../../doc
+cache = ../../doc
+crm_config = .
+crm_daemon_dir = .
+crm_daemon_user = hacluster
+ocf_root = .
+crm_dtd_dir = .
+pe_state_dir = .
+heartbeat_dir = .
+hb_delnode = ./hb_delnode
+nagios_plugins = .
diff --git a/test/unittests/test_bootstrap.py b/test/unittests/test_bootstrap.py
new file mode 100644
index 0000000..45bf03d
--- /dev/null
+++ b/test/unittests/test_bootstrap.py
@@ -0,0 +1,1905 @@
+"""
+Unitary tests for crmsh/bootstrap.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2019-10-21
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import subprocess
+import unittest
+import yaml
+import socket
+
+import crmsh.sh
+import crmsh.ssh_key
+import crmsh.user_of_host
+import crmsh.utils
+from crmsh.ui_node import NodeMgmt
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import constants
+from crmsh import qdevice
+
+
+class TestContext(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.Context
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ctx_inst = bootstrap.Context()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.bootstrap.Context.initialize_user')
+ def test_set_context(self, mock_initialize_user: mock.MagicMock):
+ options = mock.Mock(yes_to_all=True, ipv6=False)
+ ctx = self.ctx_inst.set_context(options)
+ self.assertEqual(ctx.yes_to_all, True)
+ self.assertEqual(ctx.ipv6, False)
+ mock_initialize_user.assert_called_once()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_return(self, mock_qdevice):
+ self.ctx_inst.initialize_qdevice()
+ mock_qdevice.assert_not_called()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user=None, algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_with_user(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "alice@node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user='alice', algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_together(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.sbd_devices = ["/dev/sda1"]
+ ctx.diskless_sbd = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't use -s and -S options together")
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_sbd_stage_no_option(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.yes_to_all = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Stage sbd should specify sbd device by -s or diskless sbd by -S option")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage_service(self, mock_active, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ mock_active.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't configure stage sbd: sbd.service already running! Please use crm option '-F' if need to redeploy")
+ mock_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage(self, mock_active, mock_check_all):
+ options = mock.Mock(stage="sbd", diskless_sbd=True, cluster_is_running=True)
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ ctx.cluster_is_running = True
+ mock_active.return_value = False
+ ctx._validate_sbd_option()
+ mock_active.assert_called_once_with("sbd.service")
+ mock_check_all.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_option_error_nic_number(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.nic_list = ["eth1", "eth2", "eth3"]
+ with self.assertRaises(SystemExit):
+ ctx.validate_option()
+ mock_error.assert_called_once_with("Maximum number of interface is 2")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ def test_validate_cluster_node_same_name(self, mock_ip_in_local, mock_gethost, mock_fatal):
+ options = mock.Mock(cluster_node="me", type="join")
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "me"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.return_value = ("10.10.10.41", None)
+ mock_ip_in_local.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with("Please specify peer node's hostname or IP address")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ def test_validate_cluster_node_unknown_name(self, mock_gethost, mock_fatal):
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "xxxx"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.side_effect = socket.gaierror("gethostbyname error")
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with('"xxxx": gethostbyname error')
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.Validation.valid_admin_ip')
+ def test_validate_option(self, mock_admin_ip, mock_warn):
+ ctx = crmsh.bootstrap.Context()
+ ctx.admin_ip = "10.10.10.123"
+ ctx.qdevice_inst = mock.Mock()
+ ctx._validate_sbd_option = mock.Mock()
+ ctx._validate_nodes_option = mock.Mock()
+ ctx.validate_option()
+ mock_admin_ip.assert_called_once_with("10.10.10.123")
+ ctx.qdevice_inst.valid_qdevice_options.assert_called_once_with()
+ ctx._validate_sbd_option.assert_called_once_with()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_return(self, mock_status):
+ res = self.ctx_inst.load_specific_profile(None)
+ assert res == {}
+ mock_status.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_not_exist(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("newname")
+ assert res == {}
+ mock_status.assert_called_once_with("\"newname\" profile does not exist in {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("name")
+ assert res == "test"
+ mock_status.assert_called_once_with("Loading \"name\" profile from {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform_s390(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="s390")
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, bootstrap.Context.S390_PROFILE_NAME)
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_not_called()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="xxx")
+ mock_cloud.return_value = "azure"
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, "azure")
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_called_once_with()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_not_exist(self, mock_platform, mock_exists):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = False
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_empty(self, mock_platform, mock_exists, mock_open_file, mock_load):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = ""
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+
+ @mock.patch('crmsh.bootstrap.Context.load_specific_profile')
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file(self, mock_platform, mock_exists, mock_open_file, mock_load, mock_load_specific):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = "data"
+ mock_load_specific.side_effect = [
+ {"name": "xin", "age": 18},
+ {"name": "wang"}
+ ]
+
+ self.ctx_inst.load_profiles()
+ assert self.ctx_inst.profiles_dict == {"name": "wang", "age": 18}
+
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+ mock_load_specific.assert_has_calls([
+ mock.call(bootstrap.Context.DEFAULT_PROFILE_NAME),
+ mock.call("s390")
+ ])
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'alice'
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+
+class TestBootstrap(unittest.TestCase):
+ """
+ Unitary tests for crmsh/bootstrap.py
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.parallax.parallax_call')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.sbd.SBDTimeout.is_sbd_delay_start')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_start_pacemaker(self, mock_installed, mock_enabled, mock_delay_start, mock_start, mock_parallax_call):
+ bootstrap._context = None
+ mock_installed.return_value = True
+ mock_enabled.return_value = True
+ mock_delay_start.return_value = True
+ node_list = ["node1", "node2", "node3", "node4", "node5", "node6"]
+ bootstrap.start_pacemaker(node_list)
+ mock_start.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("corosync.service", remote_addr="node2"),
+ mock.call("corosync.service", remote_addr="node3"),
+ mock.call("corosync.service", remote_addr="node4"),
+ mock.call("corosync.service", remote_addr="node5"),
+ mock.call("corosync.service", remote_addr="node6"),
+ mock.call("pacemaker.service", enable=False, node_list=node_list)
+ ])
+ mock_parallax_call.assert_has_calls([
+ mock.call(node_list, 'mkdir -p /run/systemd/system/sbd.service.d/'),
+ mock.call(node_list, "echo -e '[Service]\nUnsetEnvironment=SBD_DELAY_START' > /run/systemd/system/sbd.service.d/sbd_delay_start_disabled.conf"),
+ mock.call(node_list, "systemctl daemon-reload"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_init_ssh(self, mock_start_service, mock_config_ssh):
+ bootstrap._context = mock.Mock(current_user="alice", user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.init_ssh()
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("alice")
+ ])
+
+ @mock.patch('crmsh.userdir.gethomedir')
+ def test_key_files(self, mock_gethome):
+ mock_gethome.return_value = "/root"
+ expected_res = {"private": "/root/.ssh/id_rsa", "public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
+ self.assertEqual(bootstrap.key_files("root"), expected_res)
+ mock_gethome.assert_called_once_with("root")
+
+ @mock.patch('builtins.open')
+ def test_is_nologin(self, mock_open_file):
+ data = "hacluster:x:90:90:heartbeat processes:/var/lib/heartbeat/cores/hacluster:/sbin/nologin"
+ mock_open_file.return_value = mock.mock_open(read_data=data).return_value
+ assert bootstrap.is_nologin("hacluster") is not None
+ mock_open_file.assert_called_once_with("/etc/passwd")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell_return(self, mock_nologin, mock_status, mock_confirm):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_nologin.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_confirm.assert_called_once_with("Continue?")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell(self, mock_nologin, mock_invoke):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ mock_nologin.return_value = True
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_invoke.assert_called_once_with("usermod -s /bin/bash hacluster", None)
+
+ @mock.patch('crmsh.sh.LocalShell.su_subprocess_run')
+ def test_generate_ssh_key_pair_on_remote(self, mock_su: mock.MagicMock):
+ mock_su.return_value = mock.Mock(returncode=0, stdout=b'')
+ bootstrap.generate_ssh_key_pair_on_remote('local_sudoer', 'remote_host', 'remote_sudoer', 'remote_user')
+ mock_su.assert_has_calls([
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='''
+[ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N ''
+[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub
+'''.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ ),
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='cat ~/.ssh/id_rsa.pub'.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ ),
+ ])
+
+ @mock.patch('crmsh.bootstrap.append_unique')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.detect_file')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ def _test_configure_ssh_key(self, mock_change_shell, mock_key_files, mock_detect, mock_su, mock_append_unique):
+ mock_key_files.return_value = {"private": "/test/.ssh/id_rsa", "public": "/test/.ssh/id_rsa.pub", "authorized": "/test/.ssh/authorized_keys"}
+ mock_detect.side_effect = [True, True, False]
+
+ bootstrap.configure_ssh_key("test")
+
+ mock_change_shell.assert_called_once_with("test")
+ mock_key_files.assert_called_once_with("test")
+ mock_detect.assert_has_calls([
+ mock.call("/test/.ssh/id_rsa"),
+ mock.call("/test/.ssh/id_rsa.pub"),
+ mock.call("/test/.ssh/authorized_keys")
+ ])
+ mock_append_unique.assert_called_once_with("/test/.ssh/id_rsa.pub", "/test/.ssh/authorized_keys", "test")
+ mock_su.assert_called_once_with('test', 'touch /test/.ssh/authorized_keys')
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ def test_configure_ssh_key(self, mock_ensure_key_pair, mock_add):
+ public_key = crmsh.ssh_key.InMemoryPublicKey('foo')
+ mock_ensure_key_pair.return_value = (True, [public_key])
+ bootstrap.configure_ssh_key('alice')
+ mock_ensure_key_pair.assert_called_once_with(None, 'alice')
+ mock_add.assert_called_once_with(None, 'alice', public_key)
+
+ @mock.patch('crmsh.bootstrap.append_to_remote_file')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique_remote(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile", user="root", remote="node1", from_local=True)
+ mock_check.assert_called_once_with("fromfile", "tofile", remote="node1", source_local=True)
+ mock_append.assert_called_once_with("fromfile", "root", "node1", "tofile")
+
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile")
+ mock_check.assert_called_once_with("fromfile", "tofile", remote=None, source_local=False)
+ mock_append.assert_called_once_with("fromfile", "tofile", remote=None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_append_to_remote_file(self, mock_run):
+ bootstrap.append_to_remote_file("fromfile", "root", "node1", "tofile")
+ cmd = "cat fromfile | ssh {} root@node1 'cat >> tofile'".format(constants.SSH_OPTION)
+ mock_run.assert_called_once_with(cmd)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_join_ssh_no_seed_host(self, mock_error):
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh(None, None)
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key_for_secondary_user')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh(
+ self,
+ mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change, mock_swap_2,
+ ):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 0
+
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ mock.call("hacluster"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_called_once_with("node1", "bob", "alice", "bob", "alice", add=True)
+ mock_invoke.assert_called_once_with(
+ "bob",
+ "ssh {} alice@node1 sudo crm cluster init -i eth1 ssh_remote".format(constants.SSH_OPTION),
+ )
+ mock_swap_2.assert_called_once()
+ args, kwargs = mock_swap_2.call_args
+ self.assertEqual(3, len(args))
+ self.assertEqual('node1', args[1])
+ self.assertEqual('hacluster', args[2])
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFile.public_key')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.list_public_key_for_user')
+ @mock.patch('logging.Logger.info')
+ def test_swap_public_ssh_key_for_secondary_user(
+ self,
+ mock_log_info,
+ mock_list_public_key_for_user,
+ mock_ensure_key_pair_exists_for_user,
+ mock_public_key,
+ mock_authorized_key_manager_add,
+ ):
+ mock_shell = mock.Mock(
+ crmsh.sh.ClusterShell,
+ local_shell=mock.Mock(crmsh.sh.LocalShell),
+ user_of_host=mock.Mock(crmsh.user_of_host.UserOfHost),
+ )
+ mock_list_public_key_for_user.return_value = ['~/.ssh/id_rsa', '~/.ssh/id_ed25519']
+ mock_ensure_key_pair_exists_for_user.return_value = (True, [
+ crmsh.ssh_key.InMemoryPublicKey('foo'),
+ crmsh.ssh_key.InMemoryPublicKey('bar'),
+ ])
+ mock_public_key.return_value = 'public_key'
+ crmsh.bootstrap.swap_public_ssh_key_for_secondary_user(mock_shell, 'node1', 'alice')
+ mock_list_public_key_for_user.assert_called_once_with(None, 'alice')
+ mock_ensure_key_pair_exists_for_user.assert_called_once_with('node1', 'alice')
+ mock_authorized_key_manager_add.assert_has_calls([
+ mock.call(None, 'alice', crmsh.ssh_key.InMemoryPublicKey('foo')),
+ mock.call('node1', 'alice', crmsh.ssh_key.KeyFile('~/.ssh/id_rsa')),
+ ])
+ mock_log_info.assert_called_with("A new ssh keypair is generated for user %s@%s.", 'alice', 'node1')
+
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh_bad_credential(self, mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 255
+
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_not_called()
+ mock_invoke.assert_not_called()
+
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key_exception(self, mock_check_passwd, mock_warn, mock_export_ssh_key, mock_import_ssh):
+ mock_check_passwd.return_value = False
+ mock_import_ssh.side_effect = ValueError("Can't get the remote id_rsa.pub from {}: {}")
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+ mock_warn.assert_called_once_with(mock_import_ssh.side_effect)
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key(self, mock_check_passwd, mock_export_ssh, mock_import_ssh):
+ mock_check_passwd.return_value = True
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_export_ssh.assert_called_once_with("bob", "bob", "node1", "alice", "alice")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add_return(self, mock_this_node):
+ ctx = mock.Mock(user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.bootstrap_add(ctx)
+ mock_this_node.assert_not_called()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add(self, mock_this_node, mock_info, mock_run):
+ ctx = mock.Mock(current_user="alice", user_at_node_list=["bob@node2", "carol@node3"], nic_list=["eth1"], use_ssh_agent=False)
+ mock_this_node.return_value = "node1"
+ bootstrap.bootstrap_add(ctx)
+ mock_info.assert_has_calls([
+ mock.call("Adding node node2 to cluster"),
+ mock.call("Running command on node2: crm cluster join -y -i eth1 -c alice@node1"),
+ mock.call("Adding node node3 to cluster"),
+ mock.call("Running command on node3: crm cluster join -y -i eth1 -c alice@node1")
+ ])
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_nodelist(self, mock_run, mock_error):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_run.return_value = (1, None, None)
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_called_once_with('node1', 'crm_node -l')
+ mock_error.assert_called_once_with("Can't fetch cluster nodes list from node1: None")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_hostname(
+ self,
+ mock_run,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_error,
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (1, None, None)
+ ]
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_error.assert_called_once_with("Can't fetch hostname of node1: None")
+
+ @mock.patch('crmsh.bootstrap.swap_key_for_hacluster')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.utils.ssh_copy_id')
+ @mock.patch('crmsh.utils.user_of')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes(
+ self,
+ mock_run,
+ mock_swap,
+ mock_userof,
+ mock_ssh_copy_id: mock.MagicMock,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_change_shell,
+ mock_swap_hacluster
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_userof.return_value = "bob"
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (0, "node1", None)
+ ]
+
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_userof.assert_called_once_with("node2")
+ mock_ssh_copy_id.assert_has_calls([
+ mock.call('carol', 'bob', 'node2')
+ ])
+ mock_swap.assert_has_calls([
+ mock.call('node2', "carol", "bob", "carol", "bob"),
+ mock.call('node2', 'hacluster', 'hacluster', 'carol', 'bob', add=True)
+ ])
+
+ @mock.patch('crmsh.userdir.getuser')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('os.path.join')
+ @mock.patch('os.path.exists')
+ def test_init_ssh_remote_no_sshkey(self, mock_exists, mock_join, mock_append, mock_open_file, mock_key_files, mock_getuser):
+ mock_getuser.return_value = "alice"
+ mock_key_files.return_value = {"private": "/home/alice/.ssh/id_rsa", "public": "/home/alice/.ssh/id_rsa.pub", "authorized": "/home/alice/.ssh/authorized_keys"}
+ mock_exists.side_effect = [False, True, False, False, False]
+ mock_join.side_effect = ["/home/alice/.ssh/id_rsa",
+ "/home/alice/.ssh/id_dsa",
+ "/home/alice/.ssh/id_ecdsa",
+ "/home/alice/.ssh/id_ed25519"]
+ mock_open_file.side_effect = [
+ mock.mock_open().return_value,
+ mock.mock_open(read_data="data1 data2").return_value,
+ mock.mock_open(read_data="data1111").return_value
+ ]
+
+ bootstrap.init_ssh_remote()
+
+ mock_getuser.assert_called_once_with()
+ mock_key_files.assert_called_once_with("alice")
+
+ mock_open_file.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys", 'w'),
+ mock.call("/home/alice/.ssh/authorized_keys", "r+"),
+ mock.call("/home/alice/.ssh/id_rsa.pub")
+ ])
+ mock_exists.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys"),
+ mock.call("/home/alice/.ssh/id_rsa"),
+ mock.call("/home/alice/.ssh/id_dsa"),
+ mock.call("/home/alice/.ssh/id_ecdsa"),
+ mock.call("/home/alice/.ssh/id_ed25519"),
+ ])
+ mock_append.assert_called_once_with("/home/alice/.ssh/id_rsa.pub", "/home/alice/.ssh/authorized_keys")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname(self, mock_run):
+ mock_run.return_value = (0, "Node1", None)
+
+ peer_node = bootstrap.get_node_canonical_hostname('node1')
+ self.assertEqual('Node1', peer_node)
+ mock_run.assert_called_once_with('node1', 'crm_node --name')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.get_node_canonical_hostname('node1')
+
+ mock_run.assert_called_once_with("node1", "crm_node --name")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_local_offline(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = False
+
+ assert bootstrap.is_online() is False
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_on_init_node(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node=None)
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = True
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_peer_offline(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node1')
+ mock_is_online.side_effect = [True, False]
+ bootstrap.COROSYNC_CONF_ORIG = "/tmp/crmsh_tmpfile"
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node1"
+ mock_corosync_conf.side_effect = [ "/etc/corosync/corosync.conf",
+ "/etc/corosync/corosync.conf"]
+
+ bootstrap.is_online()
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node1')
+ mock_corosync_conf.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_copy.assert_called_once_with(bootstrap.COROSYNC_CONF_ORIG, "/etc/corosync/corosync.conf")
+ mock_csync2.assert_called_once_with("/etc/corosync/corosync.conf")
+ mock_stop_service.assert_called_once_with("corosync")
+ mock_error.assert_called_once_with("Cannot see peer node \"node1\", please check the communication IP")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_both_online(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_is_online.side_effect = [True, True]
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node2"
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node2')
+ mock_corosync_conf.assert_not_called()
+ mock_copy.assert_not_called()
+ mock_csync2.assert_not_called()
+ mock_stop_service.assert_not_called()
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update_no_conflicts(self, mock_invoke, mock_invokerc):
+ mock_invokerc.return_value = True
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_called_once_with("csync2 -rm /etc/corosync.conf")
+ mock_invokerc.assert_called_once_with("csync2 -rxv /etc/corosync.conf")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update(self, mock_invoke, mock_invokerc, mock_warn):
+ mock_invokerc.side_effect = [False, False]
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_has_calls([
+ mock.call("csync2 -rm /etc/corosync.conf"),
+ mock.call("csync2 -rf /etc/corosync.conf")
+ ])
+ mock_invokerc.assert_has_calls([
+ mock.call("csync2 -rxv /etc/corosync.conf"),
+ mock.call("csync2 -rxv /etc/corosync.conf")
+ ])
+ mock_warn.assert_called_once_with("/etc/corosync.conf was not synced")
+
+ @mock.patch('crmsh.utils.InterfacesInfo')
+ def test_init_network(self, mock_interfaces):
+ mock_interfaces_inst = mock.Mock()
+ mock_interfaces.return_value = mock_interfaces_inst
+ mock_interfaces_inst.get_default_nic_list_from_route.return_value = ["eth0", "eth1"]
+ bootstrap._context = mock.Mock(ipv6=False, second_heartbeat=False, nic_list=["eth0", "eth1"], default_nic_list=["eth0", "eth1"])
+
+ bootstrap.init_network()
+
+ mock_interfaces.assert_called_once_with(False, False, bootstrap._context.nic_list)
+ mock_interfaces_inst.get_interfaces_info.assert_called_once_with()
+ mock_interfaces_inst.get_default_nic_list_from_route.assert_called_once_with()
+ mock_interfaces_inst.get_default_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.disable_service')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_no_config(self, mock_status, mock_disable):
+ bootstrap._context = mock.Mock(qdevice_inst=None)
+ bootstrap.init_qdevice()
+ mock_status.assert_not_called()
+ mock_disable.assert_called_once_with("corosync-qdevice.service")
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_copy_ssh_key_failed(
+ self,
+ mock_status, mock_check_ssh_passwd_need,
+ mock_configure_ssh_key, mock_ssh_copy_id, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_check_ssh_passwd_need.return_value = True
+ mock_ssh_copy_id.return_value = 255
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+
+ with self.assertRaises(ValueError):
+ bootstrap.init_qdevice()
+
+ mock_status.assert_has_calls([
+ mock.call("Configure Qdevice/Qnetd:"),
+ ])
+ mock_check_ssh_passwd_need.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_called_once_with('bob')
+ mock_ssh_copy_id.assert_called_once_with('bob', 'bob', '10.10.10.123')
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_already_configured(
+ self,
+ mock_status, mock_ssh, mock_configure_ssh_key,
+ mock_qdevice_configured, mock_confirm, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_status.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Qdevice is already configured - overwrite?")
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice(self, mock_info, mock_ssh, mock_configure_ssh_key, mock_qdevice_configured,
+ mock_this_node, mock_list_nodes, mock_adjust_priority, mock_adjust_fence_delay,
+ mock_user_of_host, mock_host_user_config_class):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_this_node.return_value = "192.0.2.100"
+ mock_list_nodes.return_value = []
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = False
+ self.qdevice_with_ip.set_cluster_name = mock.Mock()
+ self.qdevice_with_ip.valid_qnetd = mock.Mock()
+ self.qdevice_with_ip.config_and_start_qdevice = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_host_user_config_class.return_value.add.assert_has_calls([
+ mock.call('bob', '192.0.2.100'),
+ mock.call('bob', '10.10.10.123'),
+ ])
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ self.qdevice_with_ip.set_cluster_name.assert_called_once_with()
+ self.qdevice_with_ip.valid_qnetd.assert_called_once_with()
+ self.qdevice_with_ip.config_and_start_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_service_not_available(
+ self,
+ mock_info, mock_list_nodes, mock_available,
+ mock_host_user_config_class,
+ mock_fatal,
+ ):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
+ mock_list_nodes.return_value = ["node1"]
+ mock_available.return_value = False
+ mock_fatal.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.init_qdevice()
+
+ mock_host_user_config_class.return_value.save_local.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_not_called()
+ mock_fatal.assert_called_once_with("corosync-qdevice.service is not available on node1")
+ mock_available.assert_called_once_with("corosync-qdevice.service", "node1")
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ def test_configure_qdevice_interactive_return(self, mock_prompt):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ bootstrap.configure_qdevice_interactive()
+ mock_prompt.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_confirm(self, mock_confirm, mock_info):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = False
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_installed(self, mock_confirm, mock_info, mock_installed, mock_error):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False]
+ mock_installed.side_effect = ValueError("corosync-qdevice not installed")
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_has_calls([
+ mock.call("Do you want to configure QDevice?"),
+ mock.call("Please install the package manually and press 'y' to continue")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive(self, mock_confirm, mock_info, mock_installed, mock_prompt, mock_qdevice):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_prompt.side_effect = ["alice@qnetd-node", 5403, "ffsplit", "lowest", "on", None]
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+ mock_prompt.assert_has_calls([
+ mock.call("HOST or IP of the QNetd server to be used",
+ valid_func=qdevice.QDevice.check_qnetd_addr),
+ mock.call("TCP PORT of QNetd server", default=5403,
+ valid_func=qdevice.QDevice.check_qdevice_port),
+ mock.call("QNetd decision ALGORITHM (ffsplit/lms)", default="ffsplit",
+ valid_func=qdevice.QDevice.check_qdevice_algo),
+ mock.call("QNetd TIE_BREAKER (lowest/highest/valid node id)", default="lowest",
+ valid_func=qdevice.QDevice.check_qdevice_tie_breaker),
+ mock.call("Whether using TLS on QDevice/QNetd (on/off/required)", default="on",
+ valid_func=qdevice.QDevice.check_qdevice_tls),
+ mock.call("Heuristics COMMAND to run with absolute path; For multiple commands, use \";\" to separate",
+ valid_func=qdevice.QDevice.check_qdevice_heuristics,
+ allow_empty=True)
+ ])
+ mock_qdevice.assert_called_once_with('qnetd-node', port=5403, ssh_user='alice', algo='ffsplit', tie_breaker='lowest', tls='on', cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_no_configured(self, mock_qdevice_configured, mock_error):
+ mock_qdevice_configured.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_error.assert_called_once_with("No QDevice configuration in this cluster")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_not_confirmed(self, mock_qdevice_configured, mock_confirm):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.qdevice.QDevice.remove_certification_files_on_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_config')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_reload(self, mock_qdevice_configured, mock_confirm, mock_reachable, mock_evaluate,
+ mock_status, mock_invoke, mock_status_long, mock_update_votes, mock_remove_config, mock_remove_db,
+ mock_remove_files, mock_adjust_priority, mock_adjust_fence_delay, mock_service_is_active):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = True
+ mock_evaluate.return_value = qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_service_is_active.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+ mock_reachable.assert_called_once_with()
+ mock_evaluate.assert_called_once_with(qdevice.QDEVICE_REMOVE)
+ mock_status.assert_has_calls([
+ mock.call("Disable corosync-qdevice.service"),
+ mock.call("Stopping corosync-qdevice.service")
+ ])
+ mock_invoke.assert_has_calls([
+ mock.call("crm cluster run 'systemctl disable corosync-qdevice'"),
+ mock.call("crm cluster run 'systemctl stop corosync-qdevice'"),
+ mock.call("crm cluster run 'crm corosync reload'")
+ ] )
+ mock_status_long.assert_called_once_with("Removing QDevice configuration from cluster")
+ mock_update_votes.assert_called_once_with()
+ mock_remove_config.assert_called_once_with()
+ mock_remove_db.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_tls_on')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_start_qdevice_on_join_node(self, mock_status_long, mock_is_unicast, mock_add_nodelist,
+ mock_conf, mock_csync2_update, mock_invoke, mock_qdevice_tls,
+ mock_get_value, mock_qdevice, mock_start_service):
+ mock_is_unicast.return_value = False
+ mock_qdevice_tls.return_value = True
+ mock_conf.return_value = "corosync.conf"
+ mock_get_value.return_value = "10.10.10.123"
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+ mock_qdevice_inst.certificate_process_on_join = mock.Mock()
+
+ bootstrap.start_qdevice_on_join_node("node2")
+
+ mock_status_long.assert_called_once_with("Starting corosync-qdevice.service")
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_conf.assert_called_once_with()
+ mock_csync2_update.assert_called_once_with("corosync.conf")
+ mock_invoke.assert_called_once_with("crm corosync reload")
+ mock_qdevice_tls.assert_called_once_with()
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_qdevice.assert_called_once_with("10.10.10.123", cluster_node="node2")
+ mock_qdevice_inst.certificate_process_on_join.assert_called_once_with()
+ mock_start_service.assert_called_once_with("corosync-qdevice.service", enable=True)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.log.LoggerUtils.log_only_to_file')
+ def test_invoke(self, mock_log, mock_run):
+ mock_run.return_value = (0, "output", "error")
+ res = bootstrap.invoke("cmd --option")
+ self.assertEqual(res, (True, "output", "error"))
+ mock_log.assert_has_calls([
+ mock.call('invoke: cmd --option'),
+ mock.call('stdout: output'),
+ mock.call('stderr: error')
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_invokerc(self, mock_invoke):
+ mock_invoke.return_value = (True, None, None)
+ res = bootstrap.invokerc("cmd")
+ self.assertEqual(res, True)
+ mock_invoke.assert_called_once_with("cmd")
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('os.path.isfile')
+ def test_sync_files_to_disk(self, mock_isfile, mock_cluster_cmd):
+ bootstrap.FILES_TO_SYNC = ("file1", "file2")
+ mock_isfile.side_effect = [True, True]
+ bootstrap.sync_files_to_disk()
+ mock_isfile.assert_has_calls([mock.call("file1"), mock.call("file2")])
+ mock_cluster_cmd.assert_called_once_with("sync file1 file2")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay_2node(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(True)
+ mock_run.assert_called_once_with("crm resource param res_1 set pcmk_delay_max {}s".format(constants.PCMK_DELAY_MAX))
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(False)
+ mock_run.assert_called_once_with("crm resource param res_1 delete pcmk_delay_max")
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout_sbd(self, mock_is_active, mock_sbd_timeout):
+ mock_is_active.return_value = True
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration = mock.Mock()
+ bootstrap.adjust_stonith_timeout()
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.bootstrap.get_stonith_timeout_generally_expected')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout(self, mock_is_active, mock_get_timeout, mock_set):
+ mock_is_active.return_value = False
+ mock_get_timeout.return_value = 30
+ bootstrap.adjust_stonith_timeout()
+ mock_set.assert_called_once_with("stonith-timeout", 30, conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults_2node(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(True)
+ mock_set.assert_called_once_with('priority', 1, property_type='rsc_defaults', conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(False)
+ mock_set.assert_called_once_with('priority', 0, property_type='rsc_defaults')
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_fence_agent(self, mock_run):
+ mock_run.return_value = None
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_pcmk_delay(self, mock_run, mock_set):
+ mock_run.return_value = "data"
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+ mock_set.assert_called_once_with("priority-fencing-delay", 0)
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties_no_service(self, mock_is_active):
+ mock_is_active.return_value = False
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.adjust_stonith_timeout')
+ @mock.patch('crmsh.bootstrap.adjust_pcmk_delay_max')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties(self, mock_is_active, mock_2node_qdevice, mock_adj_pcmk, mock_adj_stonith, mock_adj_priority, mock_adj_fence):
+ mock_is_active.return_value = True
+ mock_2node_qdevice.return_value = True
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+ mock_adj_pcmk.assert_called_once_with(True)
+ mock_adj_stonith.assert_called_once_with()
+ mock_adj_priority.assert_called_once_with(True)
+ mock_adj_fence.assert_called_once_with(True)
+
+ @mock.patch('crmsh.utils.cluster_copy_file')
+ def test_sync_file_skip_csync2(self, mock_copy):
+ bootstrap._context = mock.Mock(skip_csync2=True, node_list_in_cluster=["node1", "node2"])
+ bootstrap.sync_file("/file1")
+ mock_copy.assert_called_once_with("/file1", nodes=["node1", "node2"], output=False)
+
+ @mock.patch('crmsh.bootstrap.csync2_update')
+ def test_sync_file(self, mock_csync2_update):
+ bootstrap._context = mock.Mock(skip_csync2=False)
+ bootstrap.sync_file("/file1")
+ mock_csync2_update.assert_called_once_with("/file1")
+
+
+class TestValidation(unittest.TestCase):
+ """
+ Unitary tests for class bootstrap.Validation
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.validate_inst = bootstrap.Validation("10.10.10.1")
+ self.validate_port_inst_in_use = bootstrap.Validation("4567", ["4568"])
+ self.validate_port_inst_out_of_range = bootstrap.Validation("456766")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.IP.is_mcast')
+ def test_is_mcast_addr(self, mock_mcast):
+ mock_mcast.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_mcast_addr()
+ self.assertEqual("10.10.10.1 is not multicast address", str(err.exception))
+ mock_mcast.assert_called_once_with("10.10.10.1")
+
+ def test_is_local_addr(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_local_addr(["20.20.20.1", "20.20.20.2"])
+ self.assertEqual("Address must be a local address (one of ['20.20.20.1', '20.20.20.2'])", str(err.exception))
+
+ def test_is_valid_port_in_use(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_in_use._is_valid_port()
+ self.assertEqual("Port 4567 is already in use by corosync. Leave a gap between multiple rings.", str(err.exception))
+
+ def test_is_valid_port_out_of_range(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_out_of_range._is_valid_port()
+ self.assertEqual("Valid port range should be 1025-65535", str(err.exception))
+
+ @mock.patch('crmsh.bootstrap.Validation._is_mcast_addr')
+ def test_valid_mcast_address(self, mock_mcast):
+ bootstrap.Validation.valid_mcast_address("10.10.10.1")
+ mock_mcast.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_ucast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"])
+ bootstrap.Validation.valid_ucast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_mcast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"],
+ local_network_list=["10.10.10.0"])
+ bootstrap.Validation.valid_mcast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3", "10.10.10.0"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_valid_port')
+ def test_valid_port(self, mock_port):
+ bootstrap.Validation.valid_port("10.10.10.1")
+ mock_port.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ def test_valid_admin_ip_in_use(self, mock_ipv6, mock_invoke):
+ mock_ipv6.return_value = False
+ mock_invoke.return_value = True
+
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst.valid_admin_ip("10.10.10.1")
+ self.assertEqual("Address already in use: 10.10.10.1", str(err.exception))
+
+ mock_ipv6.assert_called_once_with("10.10.10.1")
+ mock_invoke.assert_called_once_with("ping -c 1 10.10.10.1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_cluster_is_active(self, mock_context, mock_init, mock_active,
+ mock_error):
+ mock_context_inst = mock.Mock()
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Cluster is not active - can't execute removing action")
+
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_error.assert_not_called()
+ mock_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice_cluster_node(self, mock_context, mock_init, mock_active, mock_error):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node="node1")
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = True
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Either remove node or qdevice")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_cluster_node(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_status, mock_prompt):
+ mock_context_inst = mock.Mock(yes_to_all=False, cluster_node=None, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_prompt.return_value = None
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_status.assert_called_once_with('Remove This Node from Cluster:\n You will be asked for the IP address or name of an existing node,\n which will be removed from the cluster. This command must be\n executed from a different node in the cluster.\n')
+ mock_prompt.assert_called_once_with("IP address or hostname of cluster node (e.g.: 192.168.1.1)", ".+")
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_confirm(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = False
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_error.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self_need_force(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = True
+ mock_this_node.return_value = "node1"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Removing self requires --force")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.remove_self')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_this_node.return_value = "node1"
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_not_called()
+ mock_self.assert_called_once_with(True)
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node1')
+
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_not_in_cluster(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_list):
+ mock_context_inst = mock.Mock(cluster_node="node2", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node3"]
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Specified node node2 is not configured in cluster! Unable to remove.")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node')
+ @mock.patch('crmsh.bootstrap.remove_node_from_cluster')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node,
+ mock_list, mock_remove, mock_fetch, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node2", qdevice_rm_flag=None, force=True)
+ mock_context.return_value = mock_context_inst
+ mock_active.side_effect = [True, False]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node2"]
+ mock_fetch.return_value = ["node1", "node2"]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_error.assert_not_called()
+ mock_remove.assert_called_once_with('node2')
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node2')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ def test_remove_self_other_nodes(self, mock_this_node, mock_list, mock_run, mock_error):
+ mock_this_node.return_value = 'node1'
+ mock_list.return_value = ["node1", "node2"]
+ mock_run.return_value = (1, '', 'err')
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(cluster_node="node1", yes_to_all=True)
+ bootstrap.remove_self()
+
+ mock_list.assert_called_once_with(include_remote_nodes=False)
+ mock_run.assert_called_once_with("node2", "crm cluster remove -y -c node1")
+ mock_error.assert_called_once_with("Failed to remove this node from node2")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_rm_configuration_files(self, mock_run, mock_installed):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ mock_installed.return_value = True
+ bootstrap.rm_configuration_files()
+ mock_run.assert_has_calls([
+ mock.call('rm -f file1 file2', None),
+ mock.call('cp /usr/share/fillup-templates/sysconfig.sbd /etc/sysconfig/sbd', None)
+ ])
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip_host(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["node1", "node2"]
+ self.assertIsNone(bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_not_called()
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_get_iplist.return_value = ["10.10.10.1"]
+ self.assertEqual("10.10.10.1", bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_called_once_with('node1')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_stop_services(self, mock_active, mock_status, mock_stop):
+ mock_active.side_effect = [True, True, True, True]
+ bootstrap.stop_services(bootstrap.SERVICES_STOP_LIST)
+ mock_active.assert_has_calls([
+ mock.call("corosync-qdevice.service", remote_addr=None),
+ mock.call("corosync.service", remote_addr=None),
+ mock.call("hawk.service", remote_addr=None),
+ mock.call("csync2.socket", remote_addr=None)
+ ])
+ mock_status.assert_has_calls([
+ mock.call('Stopping the %s%s', 'corosync-qdevice.service', ''),
+ mock.call('Stopping the %s%s', 'corosync.service', ''),
+ mock.call('Stopping the %s%s', 'hawk.service', ''),
+ mock.call('Stopping the %s%s', 'csync2.socket', '')
+ ])
+ mock_stop.assert_has_calls([
+ mock.call("corosync-qdevice.service", disable=True, remote_addr=None),
+ mock.call("corosync.service", disable=True, remote_addr=None),
+ mock.call("hawk.service", disable=True, remote_addr=None),
+ mock.call("csync2.socket", disable=True, remote_addr=None)
+ ])
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_node_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_error.assert_called_once_with("Failed to remove node1.")
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_csync_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_invokerc, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = True
+ mock_invokerc.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_invokerc.assert_has_calls([
+ mock.call("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ ])
+ mock_error.assert_called_once_with("Removing the node node1 from {} failed".format(bootstrap.CSYNC2_CFG))
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.bootstrap.decrease_expected_votes')
+ @mock.patch('crmsh.corosync.del_node')
+ @mock.patch('crmsh.corosync.get_values')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, mock_status,
+ mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, mock_decrease, mock_csync2,
+ mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, mock_is_active, mock_cal_delnode):
+ mock_get_ip.return_value = "10.10.10.1"
+ mock_cal_delnode.return_value = True
+ mock_invoke.side_effect = [(True, None, None)]
+ mock_invokerc.return_value = True
+ mock_get_values.return_value = ["10.10.10.1"]
+ mock_is_active.return_value = False
+
+ bootstrap._context = mock.Mock(cluster_node="node1", rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_has_calls([
+ mock.call("Removing the node node1"),
+ mock.call("Propagating configuration changes across the remaining nodes")
+ ])
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_cal_delnode.assert_called_once_with("node1")
+ mock_invoke.assert_has_calls([
+ mock.call("corosync-cfgtool -R")
+ ])
+ mock_invokerc.assert_called_once_with("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ mock_error.assert_not_called()
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_del.assert_called_once_with("10.10.10.1")
+ mock_decrease.assert_called_once_with()
+ mock_csync2.assert_has_calls([
+ mock.call(bootstrap.CSYNC2_CFG),
+ mock.call("/etc/corosync/corosync.conf")
+ ])
diff --git a/test/unittests/test_bugs.py b/test/unittests/test_bugs.py
new file mode 100644
index 0000000..725b020
--- /dev/null
+++ b/test/unittests/test_bugs.py
@@ -0,0 +1,893 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import cibconfig
+from lxml import etree
+from crmsh import xmlutil
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+ factory._push_state()
+
+
+def teardown_function():
+ factory._pop_state()
+
+
+def test_bug41660_1():
+ xml = """<primitive id="bug41660" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="bug41660-meta"> \
+ <nvpair id="bug41660-meta-target-role" name="target-role" value="Stopped"/> \
+ </meta_attributes> \
+ </primitive>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive bug41660 ocf:pacemaker:Dummy meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("bug41660", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_2():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta">
+ <nvpair id="libvirtd-interleave" name="interleave" value="true"/>
+ <nvpair id="libvirtd-ordered" name="ordered" value="true"/>
+ <nvpair id="libvirtd-clone-meta-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ #data = obj.repr_cli(format_mode=-1)
+ #print data
+ #exp = 'clone libvirtd-clone libvirtd meta interleave=true ordered=true target-role=Stopped'
+ #assert data == exp
+ #assert obj.cli_use_validate()
+
+ print(etree.tostring(obj.node))
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ print("PRE", etree.tostring(obj.node))
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ print("POST", etree.tostring(obj.node))
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_3():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta_attributes">
+ <nvpair id="libvirtd-clone-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone libvirtd-clone libvirtd meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_comments():
+ xml = """<cib epoch="25" num_updates="1" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Thu Mar 6 15:53:49 2014" update-origin="beta1" update-client="cibadmin" update-user="root" crm_feature_set="3.0.8" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.11-3.3-3ca8c3b"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <!--# COMMENT TEXT 1 -->
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="beta1" id="1">
+ <!--# COMMENT TEXT 2 -->
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <!--# COMMENT TEXT 3 -->
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="beta1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources/>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-shutdown" name="shutdown" value="0"/>
+ <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>"""
+ elems = etree.fromstring(xml)
+ xmlutil.sanitize_cib(elems)
+ assert xmlutil.xml_tostring(elems).count("COMMENT TEXT") == 3
+
+
+def test_eq1():
+ xml1 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ xml2 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ e1 = etree.fromstring(xml1)
+ e2 = etree.fromstring(xml2)
+ assert xmlutil.xml_equals(e1, e2, show=True)
+
+
+def test_pcs_interop_1():
+ """
+ pcs<>crmsh interop bug
+ """
+
+ xml = """<clone id="dummies">
+ <meta_attributes id="dummies-meta">
+ <nvpair name="globally-unique" value="false" id="dummies-meta-globally-unique"/>
+ </meta_attributes>
+ <meta_attributes id="dummies-meta_attributes">
+ <nvpair id="dummies-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="dummy-1" class="ocf" provider="heartbeat" type="Dummy"/>
+ </clone>"""
+ elem = etree.fromstring(xml)
+ from crmsh.ui_resource import set_deep_meta_attr_node
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+ print("BEFORE:", etree.tostring(elem))
+
+ set_deep_meta_attr_node(elem, 'target-role', 'Stopped')
+
+ print("AFTER:", etree.tostring(elem))
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+
+def test_bnc878128():
+ """
+ L3: "crm configure show" displays XML information instead of typical crm output.
+ """
+ xml = """<rsc_location id="cli-prefer-dummy-resource" rsc="dummy-resource"
+role="Started">
+ <rule id="cli-prefer-rule-dummy-resource" score="INFINITY">
+ <expression id="cli-prefer-expr-dummy-resource" attribute="#uname"
+operation="eq" value="x64-4"/>
+ <date_expression id="cli-prefer-lifetime-end-dummy-resource" operation="lt"
+end="2014-05-17 17:56:11Z"/>
+ </rule>
+</rsc_location>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'location cli-prefer-dummy-resource dummy-resource role=Started rule #uname eq x64-4 and date lt "2014-05-17 17:56:11Z"'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_order_without_score_kind():
+ """
+ Spec says order doesn't require score or kind to be set
+ """
+ xml = '<rsc_order first="a" first-action="promote" id="order-a-b" then="b" then-action="start"/>'
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'order order-a-b a:promote b:start'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+
+def test_bnc878112():
+ """
+ crm configure group can hijack a cloned primitive (and then crash)
+ """
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('group', 'g1', 'p1')
+ assert obj2 is True
+ obj3 = factory.create_object('group', 'g2', 'p1')
+ print(obj3)
+ assert obj3 is False
+
+
+def test_copy_nvpairs():
+ from crmsh.cibconfig import copy_nvpairs
+
+ to = etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ ''')
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="false"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['false'] == to.xpath('./nvpair/@value')
+
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['true'] == to.xpath('./nvpair/@value')
+
+
+def test_pengine_test():
+ xml = '''<primitive class="ocf" id="rsc1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="rsc1-instance_attributes-1">
+ <nvpair id="rsc1-instance_attributes-1-state" name="state" value="/var/run/Dummy-rsc1-clusterA"/>
+ <rule id="rsc1-instance_attributes-1-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-1-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterA"/>
+ </rule>
+ </instance_attributes>
+ <instance_attributes id="rsc1-instance_attributes-2">
+ <nvpair id="rsc1-instance_attributes-2-state" name="state" value="/var/run/Dummy-rsc1-clusterB"/>
+ <rule id="rsc1-instance_attributes-2-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-2-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterB"/>
+ </rule>
+ </instance_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc1 ocf:pacemaker:Dummy params rule 0: #cluster-name eq clusterA state="/var/run/Dummy-rsc1-clusterA" params rule 0: #cluster-name eq clusterB state="/var/run/Dummy-rsc1-clusterB" op monitor interval=10'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_tagset():
+ xml = '''<primitive class="ocf" id="%s" provider="pacemaker" type="Dummy"/>'''
+ tag = '''<tag id="t0"><obj_ref id="r1"/><obj_ref id="r2"/></tag>'''
+ factory.create_from_node(etree.fromstring(xml % ('r1')))
+ factory.create_from_node(etree.fromstring(xml % ('r2')))
+ factory.create_from_node(etree.fromstring(xml % ('r3')))
+ factory.create_from_node(etree.fromstring(tag))
+ elems = factory.get_elems_on_tag("tag:t0")
+ assert set(x.obj_id for x in elems) == set(['r1', 'r2'])
+
+
+def test_op_role():
+ xml = '''<primitive class="ocf" id="rsc2" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor" role="Stopped"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc2 ocf:pacemaker:Dummy op monitor interval=10 role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nvpair_no_value():
+ xml = '''<primitive class="ocf" id="rsc3" provider="heartbeat" type="Dummy">
+ <instance_attributes id="rsc3-instance_attributes-1">
+ <nvpair id="rsc3-instance_attributes-1-verbose" name="verbose"/>
+ <nvpair id="rsc3-instance_attributes-1-verbase" name="verbase" value=""/>
+ <nvpair id="rsc3-instance_attributes-1-verbese" name="verbese" value=" "/>
+ </instance_attributes>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc3 Dummy params verbose verbase="" verbese=" "'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_delete_ticket():
+ xml0 = '<primitive id="daa0" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml1 = '<primitive id="daa1" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml2 = '''<rsc_ticket id="taa0" ticket="taaA">
+ <resource_set id="taa0-0">
+ <resource_ref id="daa0"/>
+ <resource_ref id="daa1"/>
+ </resource_set>
+ </rsc_ticket>'''
+ for x in (xml0, xml1, xml2):
+ data = etree.fromstring(x)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+
+ factory.delete('daa0')
+ assert factory.find_object('daa0') is None
+ assert factory.find_object('taa0') is not None
+
+
+def test_quotes():
+ """
+ Parsing escaped quotes
+ """
+ xml = '''<primitive class="ocf" id="q1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="q1-instance_attributes-1">
+ <nvpair id="q1-instance_attributes-1-state" name="state" value="foo&quot;foo&quot;"/>
+ </instance_attributes>
+ </primitive>
+ '''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive q1 ocf:pacemaker:Dummy params state="foo\\"foo\\""'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs():
+ """
+ bug with parsing node attrs
+ """
+ xml = '''<node id="1" uname="dell71"> \
+ <instance_attributes id="dell71-instance_attributes"> \
+ <nvpair name="staging-0-0-placement" value="true" id="dell71-instance_attributes-staging-0-0-placement"/> \
+ <nvpair name="meta-0-0-placement" value="true" id="dell71-instance_attributes-meta-0-0-placement"/> \
+ </instance_attributes> \
+ <instance_attributes id="nodes-1"> \
+ <nvpair id="nodes-1-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>'''
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node 1: dell71 attributes staging-0-0-placement=true meta-0-0-placement=true attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs2():
+ xml = """<node id="h04" uname="h04"> \
+ <utilization id="h04-utilization"> \
+ <nvpair id="h04-utilization-utl_ram" name="utl_ram" value="1200"/> \
+ <nvpair id="h04-utilization-utl_cpu" name="utl_cpu" value="200"/> \
+ </utilization> \
+ <instance_attributes id="nodes-h04"> \
+ <nvpair id="nodes-h04-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node h04 utilization utl_ram=1200 utl_cpu=200 attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_group_constraint_location():
+ """
+ configuring a location constraint on a grouped resource is OK
+ """
+ factory.create_object('node', 'node1')
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('location', 'loc-p1', 'p1', 'inf:', 'node1')
+ c = factory.find_object('loc-p1')
+ assert c and c.check_sanity() == 0
+
+
+def test_group_constraint_colocation():
+ """
+ configuring a colocation constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2', 'inf:', 'p1', 'p2')
+ c = factory.find_object('coloc-p1-p2')
+ assert c and c.check_sanity() > 0
+
+
+def test_group_constraint_colocation_rscset():
+ """
+ configuring a constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_clone_constraint_colocation_rscset():
+ """
+ configuring a constraint on a cloned resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('clone', 'c1', 'p1')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_existing_node_resource():
+ factory.create_object('primitive', 'ha-one', 'Dummy')
+
+ n = factory.find_node('ha-one')
+ assert factory.test_element(n)
+
+ r = factory.find_resource('ha-one')
+ assert factory.test_element(r)
+
+ assert n != r
+
+ assert factory.check_structure()
+ factory.cli_use_validate_all()
+
+ ok, s = factory.mkobj_set('ha-one')
+ assert ok
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_existing_node_resource_2(mock_incr, mock_line_num):
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ from crmsh import clidisplay
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text += "\nprimitive ha-one Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ text2 = obj.repr()
+
+ assert sorted(text.split('\n')) == sorted(text2.split('\n'))
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_1(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node node1
+primitive p0 ocf:pacemaker:Dummy
+primitive p1 ocf:pacemaker:Dummy
+primitive p2 ocf:heartbeat:Delay \
+ params startdelay=2 mondelay=2 stopdelay=2
+primitive p3 ocf:pacemaker:Dummy
+primitive st stonith:null params hostlist=node1
+clone c1 p1
+ms m1 p2
+op_defaults timeout=60s
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\nprimitive p2 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\ngroup g1 p1 p2"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text.replace("group g1 p1 p2", "group g1 p1 p3")
+ text = text + "\nprimitive p3 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ print(obj.repr().strip())
+ assert obj.repr().strip() == "group g1 p1 p3"
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_3(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""node node1
+primitive node1 Dummy params fake=something
+ """)
+ assert ok
+
+ print("** baseline")
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ print(obj.repr())
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""primitive node1 Dummy params fake=something-else
+ """, remove=False, method='update')
+ assert ok
+
+ print("** end")
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib, remove=True, method='replace')
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_2(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node 168633610: webui
+node 168633611: node1
+rsc_template web-server apache \
+ params port=8000 \
+ op monitor interval=10s
+primitive d0 Dummy \
+ meta target-role=Started
+primitive d1 Dummy
+primitive d2 Dummy
+# Never use this STONITH agent in production!
+primitive development-stonith stonith:null \
+ params hostlist="webui node1 node2 node3"
+primitive proxy systemd:haproxy \
+ op monitor interval=10s
+primitive proxy-vip IPaddr2 \
+ params ip=10.13.37.20
+primitive srv1 @web-server
+primitive srv2 @web-server
+primitive vip1 IPaddr2 \
+ params ip=10.13.37.21 \
+ op monitor interval=20s
+primitive vip2 IPaddr2 \
+ params ip=10.13.37.22 \
+ op monitor interval=20s
+primitive virtual-ip IPaddr2 \
+ params ip=10.13.37.77 lvs_support=false \
+ op start timeout=20 interval=0 \
+ op stop timeout=20 interval=0 \
+ op monitor interval=10 timeout=20
+primitive yet-another-virtual-ip IPaddr2 \
+ params ip=10.13.37.72 cidr_netmask=24 \
+ op start interval=0 timeout=20 \
+ op stop interval=0 timeout=20 \
+ op monitor interval=10 timeout=20 \
+ meta target-role=Started
+group dovip d0 virtual-ip \
+ meta target-role=Stopped
+group g-proxy proxy-vip proxy
+group g-serv1 vip1 srv1
+group g-serv2 vip2 srv2
+clone d2-clone d2 \
+ meta target-role=Started
+tag dummytag d0 d1 d1-on-node1 d2 d2-clone
+# Never put the two web servers on the same node
+colocation co-serv -inf: g-serv1 g-serv2
+location d1-on-node1 d1 inf: node1
+# Never put any web server or haproxy on webui
+location l-avoid-webui { g-proxy g-serv1 g-serv2 } -inf: webui
+# Prever to spread groups across nodes
+location l-proxy g-proxy 200: node1
+location l-serv1 g-serv1 200: node2
+location l-serv2 g-serv2 200: node3
+property cib-bootstrap-options: \
+ have-watchdog=false \
+ dc-version="1.1.13+git20150917.20c2178-224.2-1.1.13+git20150917.20c2178" \
+ cluster-infrastructure=corosync \
+ cluster-name=hacluster \
+ stonith-enabled=true \
+ no-quorum-policy=ignore
+rsc_defaults rsc-options: \
+ resource-stickiness=1 \
+ migration-threshold=3
+op_defaults op-options: \
+ timeout=600 \
+ record-pending=true
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+def test_bug_110():
+ """
+ configuring attribute-based fencing-topology
+ """
+ factory.create_object(*"primitive stonith-libvirt stonith:null".split())
+ factory.create_object(*"primitive fence-nova stonith:null".split())
+ cmd = "fencing_topology attr:OpenStack-role=compute stonith-libvirt,fence-nova".split()
+ ok = factory.create_object(*cmd)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ for o in obj.obj_set:
+ if o.node.tag == 'fencing-topology':
+ assert o.check_sanity() == 0
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_reordering_resource_sets(mock_incr, mock_line_num):
+ """
+ Can we reorder resource sets?
+ """
+ from crmsh import clidisplay
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('primitive', 'p2', 'Dummy')
+ assert obj2 is True
+ obj3 = factory.create_object('primitive', 'p3', 'Dummy')
+ assert obj3 is True
+ obj4 = factory.create_object('primitive', 'p4', 'Dummy')
+ assert obj4 is True
+ o1 = factory.create_object('order', 'o1', 'p1', 'p2', 'p3', 'p4')
+ assert o1 is True
+
+ obj = cibconfig.mkset_obj('o1')
+ assert obj is not None
+ rc = obj.save('order o1 p4 p3 p2 p1')
+ assert rc == True
+
+ obj2 = cibconfig.mkset_obj('o1')
+ with clidisplay.nopretty():
+ assert "order o1 p4 p3 p2 p1" == obj2.repr().strip()
+
+
+def test_bug959895():
+ """
+ Allow importing XML with cloned groups
+ """
+ xml = """<clone id="c-bug959895">
+ <group id="g-bug959895">
+ <primitive id="p-bug959895-a" class="ocf" provider="pacemaker" type="Dummy" />
+ <primitive id="p-bug959895-b" class="ocf" provider="pacemaker" type="Dummy" />
+ </group>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone c-bug959895 g-bug959895'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("c-bug959895", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_node_util_attr():
+ """
+ Handle node with utitilization before attributes correctly
+ """
+ xml = """<node id="aberfeldy" uname="aberfeldy">
+ <utilization id="nodes-aberfeldy-utilization">
+ <nvpair id="nodes-aberfeldy-utilization-cpu" name="cpu" value="2"/>
+ <nvpair id="nodes-aberfeldy-utilization-memory" name="memory" value="500"/>
+ </utilization>
+ <instance_attributes id="nodes-aberfeldy">
+ <nvpair id="nodes-aberfeldy-standby" name="standby" value="on"/>
+ </instance_attributes>
+</node>"""
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'node aberfeldy utilization cpu=2 memory=500 attributes standby=on'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_dup_create_same_name():
+ """
+ Creating two objects with the same name
+ """
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert ok
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert not ok
+
+
+def test_dup_create():
+ """
+ Creating property sets with unknown properties
+ """
+ ok = factory.create_object(*"property hana_test1: hana_attribute_1=5 hana_attribute_2=mohican".split())
+ assert ok
+ ok = factory.create_object(*"property hana_test2: hana_attribute_1=5s a-b-c-d=e-f-g".split())
+ assert ok
diff --git a/test/unittests/test_cib.py b/test/unittests/test_cib.py
new file mode 100644
index 0000000..def915f
--- /dev/null
+++ b/test/unittests/test_cib.py
@@ -0,0 +1,32 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+from crmsh import cibconfig
+from lxml import etree
+import copy
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_cib_schema_change():
+ "Changing the validate-with CIB attribute"
+ copy_of_cib = copy.copy(factory.cib_orig)
+ print(etree.tostring(copy_of_cib, pretty_print=True))
+ tmp_cib_objects = factory.cib_objects
+ factory.cib_objects = []
+ factory.change_schema("pacemaker-1.1")
+ factory.cib_objects = tmp_cib_objects
+ factory._copy_cib_attributes(copy_of_cib, factory.cib_orig)
+ assert factory.cib_attrs["validate-with"] == "pacemaker-1.1"
+ assert factory.cib_elem.get("validate-with") == "pacemaker-1.1"
diff --git a/test/unittests/test_cliformat.py b/test/unittests/test_cliformat.py
new file mode 100644
index 0000000..2eb25b5
--- /dev/null
+++ b/test/unittests/test_cliformat.py
@@ -0,0 +1,324 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for cliformat.py
+
+from crmsh import cibconfig
+from crmsh import parse
+from lxml import etree
+from .test_parse import MockValidation
+
+factory = cibconfig.cib_factory
+
+
+def assert_is_not_none(thing):
+ assert thing is not None, "Expected non-None value"
+
+
+def roundtrip(cli, debug=False, expected=None, format_mode=-1, strip_color=False):
+ parse.validator = MockValidation()
+ node, _, _ = cibconfig.parse_cli_to_xml(cli)
+ assert_is_not_none(node)
+ obj = factory.find_object(node.get("id"))
+ if obj:
+ factory.delete(node.get("id"))
+ obj = factory.create_from_node(node)
+ assert_is_not_none(obj)
+ obj.nocli = True
+ xml = obj.repr_cli(format_mode=format_mode)
+ print(xml)
+ obj.nocli = False
+ s = obj.repr_cli(format_mode=format_mode)
+ if strip_color:
+ import re
+ s = re.sub(r"\$\{[^}]+\}", "", s)
+ if (s != cli) or debug:
+ print("GOT:", s)
+ print("EXP:", cli)
+ assert obj.cli_use_validate()
+ if expected is not None:
+ assert expected == s
+ else:
+ assert cli == s
+ assert not debug
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ "tear down test fixtures"
+
+
+def test_rscset():
+ roundtrip('colocation foo inf: a b')
+ roundtrip('order order_2 Mandatory: [ A B ] C')
+ roundtrip('rsc_template public_vm Xen')
+
+
+''' Seems rely on cluster env, should be in functional test
+def test_normalize():
+ """
+ Test automatic normalization of parameter names:
+ "shutdown_timeout" is a parameter name, but
+ "shutdown-timeout" is not.
+ """
+ roundtrip('primitive vm1 Xen params shutdown-timeout=0',
+ expected='primitive vm1 Xen params shutdown_timeout=0')
+'''
+
+
+def test_group():
+ factory.create_from_cli('primitive p1 Dummy')
+ roundtrip('group g1 p1 params target-role=Stopped')
+
+
+def test_bnc863736():
+ roundtrip('order order_3 Mandatory: [ A B ] C symmetrical=true')
+
+
+def test_sequential():
+ roundtrip('colocation rsc_colocation-master inf: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]')
+
+
+def test_broken_colo():
+ xml = """<rsc_colocation id="colo-2" score="INFINITY">
+ <resource_set id="colo-2-0" require-all="false">
+ <resource_ref id="vip1"/>
+ <resource_ref id="vip2"/>
+ </resource_set>
+ <resource_set id="colo-2-1" require-all="false" role="Master">
+ <resource_ref id="apache"/>
+ </resource_set>
+</rsc_colocation>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ assert 'colocation colo-2 inf: [ vip1 vip2 sequential=true ] [ apache:Master sequential=true ]' == data
+ assert obj.cli_use_validate()
+
+
+def test_comment():
+ roundtrip("# comment 1\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_comment2():
+ roundtrip("# comment 1\n# comment 2\n# comment 3\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_nvpair_ref1():
+ factory.create_from_cli("primitive dummy-0 Dummy params $fiz:buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @fiz:boz')
+
+
+def test_idresolve():
+ factory.create_from_cli("primitive dummy-5 Dummy params buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @dummy-5-instance_attributes-buz')
+
+
+def test_ordering():
+ xml = """<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy-monitor-60"/> \
+ </operations> \
+ <meta_attributes id="dummy-meta_attributes"> \
+ <nvpair id="dummy-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy ocf:pacemaker:Dummy op start timeout=60 interval=0 op stop timeout=60 interval=0 op monitor interval=60 timeout=30 meta target-role=Stopped'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_ordering2():
+ xml = """<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="dummy2-meta_attributes"> \
+ <nvpair id="dummy2-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy2-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy2-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy2-monitor-60"/> \
+ </operations> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy2 ocf:pacemaker:Dummy meta target-role=Stopped ' \
+ 'op start timeout=60 interval=0 op stop timeout=60 interval=0 ' \
+ 'op monitor interval=60 timeout=30'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing():
+ xml = """<fencing-topology>
+ <fencing-level devices="st1" id="fencing" index="1"
+target="ha-three"></fencing-level>
+ <fencing-level devices="st1" id="fencing-0" index="1"
+target="ha-two"></fencing-level>
+ <fencing-level devices="st1" id="fencing-1" index="1"
+target="ha-one"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology st1'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing2():
+ xml = """<fencing-topology>
+ <fencing-level devices="apple" id="fencing" index="1"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="2"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="1"
+target-pattern="red.*"></fencing-level>
+ <fencing-level devices="apple" id="fencing" index="2"
+target-pattern="red.*"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology pattern:green.* apple pear pattern:red.* pear apple'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_master():
+ xml = """<master id="ms-1">
+ <crmsh-ref id="dummy3" />
+ </master>
+ """
+ data = etree.fromstring(xml)
+ factory.create_from_cli("primitive dummy3 ocf:pacemaker:Dummy")
+ data, _, _ = cibconfig.postprocess_cli(data)
+ print("after postprocess:", etree.tostring(data))
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ assert obj.cli_use_validate()
+
+
+def test_param_rules():
+ roundtrip('primitive foo Dummy ' +
+ 'params rule #uname eq wizbang laser=yes ' +
+ 'params rule #uname eq gandalf staff=yes')
+
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: rule #uname eq node1 interface=eth1 ' +
+ 'params 2: rule #uname eq node2 interface=eth2 port=8888 ' +
+ 'params 1: interface=eth0 port=9999')
+
+
+def test_operation_rules():
+ roundtrip('primitive test Dummy ' +
+ 'op start interval=0 '
+ 'op_params 2: rule #uname eq node1 fake=fake ' +
+ 'op_params 1: fake=real ' +
+ 'op_meta 2: rule #ra-version version:gt 1.0 timeout=120s ' +
+ 'op_meta 1: timeout=60s')
+
+
+def test_multiple_attrsets():
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: interface=eth1 ' +
+ 'params 2: port=8888')
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'meta 3: interface=eth1 ' +
+ 'meta 2: port=8888')
+
+
+def test_new_acls():
+ roundtrip('role fum description=test read description=test2 xpath:"*[@name=karl]"')
+
+
+def test_acls_reftype():
+ roundtrip('role boo deny ref:d0 type:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_acls_oldsyntax():
+ roundtrip('role boo deny ref:d0 tag:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_rules():
+ roundtrip('primitive p1 Dummy params ' +
+ 'rule $role=Started date in start=2009-05-26 end=2010-05-26 ' +
+ 'or date gt 2014-01-01 state=2')
+
+
+def test_new_role():
+ roundtrip('role silly-role-2 read xpath:"//nodes//attributes" ' +
+ 'deny type:nvpair deny ref:d0 deny type:nvpair')
+
+
+def test_topology_1114():
+ roundtrip('fencing_topology attr:rack=1 node1,node2')
+
+
+def test_topology_1114_pattern():
+ roundtrip('fencing_topology pattern:.* network disk')
+
+
+def test_locrule():
+ roundtrip('location loc-testfs-with-eth1 testfs rule ethmonitor-eth1 eq 1')
+
+
+def test_is_value_sane():
+ roundtrip('''primitive p1 Dummy params state="bo'o"''')
+
+
+def test_is_value_sane_2():
+ roundtrip('primitive p1 Dummy params state="bo\\"o"')
+
+
+def test_alerts_1():
+ roundtrip('alert alert1 "/tmp/foo.sh" to "/tmp/bar.log"')
+
+
+def test_alerts_2():
+ roundtrip('alert alert2 "/tmp/foo.sh" attributes foo=bar to "/tmp/bar.log"')
+
+
+def test_alerts_3():
+ roundtrip('alert alert3 "a path here" meta baby to "/tmp/bar.log"')
+
+
+def test_alerts_4():
+ roundtrip('alert alert4 "/also/a/path"')
+
+
+def test_alerts_5():
+ roundtrip('alert alert5 "/a/path" to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_6():
+ roundtrip('alert alert6 "/a/path" select fencing attributes { standby } to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_7():
+ roundtrip('alert alert7 "/a/path" select fencing attributes foo=bar to { "/another/path" } meta timeout=30s')
diff --git a/test/unittests/test_corosync.py b/test/unittests/test_corosync.py
new file mode 100644
index 0000000..2443f36
--- /dev/null
+++ b/test/unittests/test_corosync.py
@@ -0,0 +1,488 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+
+from builtins import str
+from builtins import object
+import os
+import unittest
+import pytest
+from unittest import mock
+from crmsh import corosync
+from crmsh.corosync import Parser, make_section, make_value
+
+
+F1 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.1')).read()
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F3 = open(os.path.join(os.path.dirname(__file__), 'bug-862577_corosync.conf')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+def _valid(parser):
+ depth = 0
+ for t in parser._tokens:
+ if t.token not in (corosync._tCOMMENT,
+ corosync._tBEGIN,
+ corosync._tEND,
+ corosync._tVALUE):
+ raise AssertionError("illegal token " + str(t))
+ if t.token == corosync._tBEGIN:
+ depth += 1
+ if t.token == corosync._tEND:
+ depth -= 1
+ if depth != 0:
+ raise AssertionError("Unbalanced sections")
+
+
+def _print(parser):
+ print(parser.to_string())
+
+
+def test_query_status_exception():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("test")
+ assert str(err.value) == "Wrong type \"test\" to query status"
+
+
+@mock.patch('crmsh.corosync.query_ring_status')
+def test_query_status(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status_exception(mock_configured):
+ mock_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qdevice_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_configured.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.print_cluster_nodes')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status(mock_configured, mock_run, mock_print):
+ mock_configured.return_value = True
+ corosync.query_qdevice_status()
+ mock_run.assert_called_once_with("corosync-qdevice-tool -sv")
+ mock_print.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_ring_status")
+def test_query_status_ring(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_quorum_status")
+def test_query_status_quorum(mock_quorum_status):
+ corosync.query_status("quorum")
+ mock_quorum_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_qnetd_status")
+def test_query_status_qnetd(mock_qnetd_status):
+ corosync.query_status("qnetd")
+ mock_qnetd_status.assert_called_once_with()
+
+
+def test_query_status_except():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("xxx")
+ assert str(err.value) == "Wrong type \"xxx\" to query status"
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status_except(mock_run):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_ring_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status(mock_run):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_ring_status()
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_except(mock_run, mock_print_nodes):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_quorum_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status(mock_run, mock_print_nodes):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_no_quorum(mock_run, mock_print_nodes):
+ mock_run.return_value = (2, "no quorum", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_qdevice(mock_qdevice_configured):
+ mock_qdevice_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_cluster_name(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.return_value = None
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "cluster_name not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_host(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", None]
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "host for qnetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy_id_failed(mock_qdevice_configured,
+ mock_get_value, mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_parallax_call.side_effect = ValueError("Failed on 10.10.10.123: foo")
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert err.value.args[0] == "Failed on 10.10.10.123: foo"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy(mock_qdevice_configured, mock_get_value,
+ mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes,
+ mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ mock_parallax_call.return_value = [("node1", (0, "data", None)), ]
+
+ corosync.query_qnetd_status()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+ mock_parallax_call.assert_called_once_with(["10.10.10.123"], "corosync-qnetd-tool -lv -c hacluster")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.get_nodeinfo_from_cmaptool')
+@mock.patch('crmsh.corosync.add_node_ucast')
+def test_add_nodelist_from_cmaptool(mock_add_ucast, mock_nodeinfo):
+ mock_nodeinfo.return_value = {'1': ['10.10.10.1', '20.20.20.1'],'2': ['10.10.10.2', '20.20.20.2']}
+
+ corosync.add_nodelist_from_cmaptool()
+
+ mock_nodeinfo.assert_called_once_with()
+ mock_add_ucast.assert_has_calls([
+ mock.call(['10.10.10.1', '20.20.20.1'], '1'),
+ mock.call(['10.10.10.2', '20.20.20.2'], '2')
+ ])
+
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_unicast(mock_get_value):
+ mock_get_value.return_value = "udpu"
+ assert corosync.is_unicast() is True
+ mock_get_value.assert_called_once_with("totem.transport")
+
+
+@mock.patch('crmsh.corosync.get_corosync_value_dict')
+def test_token_and_consensus_timeout(mock_get_dict):
+ mock_get_dict.return_value = {"token": 10, "consensus": 12}
+ assert corosync.token_and_consensus_timeout() == 22
+
+
+@mock.patch('crmsh.corosync.get_corosync_value')
+def test_get_corosync_value_dict(mock_get_value):
+ mock_get_value.side_effect = ["10000", None]
+ res = corosync.get_corosync_value_dict()
+ assert res == {"token": 10, "consensus": 12}
+
+
+@mock.patch('crmsh.corosync.get_value')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value_raise(mock_run, mock_get_value):
+ mock_run.side_effect = ValueError
+ mock_get_value.return_value = None
+ assert corosync.get_corosync_value("xxx") is None
+ mock_run.assert_called_once_with("corosync-cmapctl xxx")
+ mock_get_value.assert_called_once_with("xxx")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value(mock_run):
+ mock_run.return_value = "totem.token = 10000"
+ assert corosync.get_corosync_value("totem.token") == "10000"
+ mock_run.assert_called_once_with("corosync-cmapctl totem.token")
+
+
+class TestCorosyncParser(unittest.TestCase):
+ def test_parse(self):
+ p = Parser(F1)
+ _valid(p)
+ self.assertEqual(p.get('logging.logfile'), '/var/log/cluster/corosync.log')
+ self.assertEqual(p.get('totem.interface.ttl'), '1')
+ p.set('totem.interface.ttl', '2')
+ _valid(p)
+ self.assertEqual(p.get('totem.interface.ttl'), '2')
+ p.remove('quorum')
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 0)
+ p.add('', make_section('quorum', []))
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 1)
+ p.set('quorum.votequorum', '2')
+ _valid(p)
+ self.assertEqual(p.get('quorum.votequorum'), '2')
+ p.set('bananas', '5')
+ _valid(p)
+ self.assertEqual(p.get('bananas'), '5')
+
+ def test_udpu(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', '10.10.10.10') +
+ make_value('nodelist.node.nodeid', str(corosync.get_free_nodeid(p)))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 6)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1', '2', '3'])
+
+ def test_add_node_no_nodelist(self):
+ "test checks that if there is no nodelist, no node is added"
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F1)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_no_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ corosync.find_configured_ip(["10.10.10.2"])
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ mock_search.assert_called_once_with("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ with self.assertRaises(corosync.IPAlreadyConfiguredError) as err:
+ corosync.find_configured_ip(["10.10.10.2"])
+ self.assertEqual("IP 10.10.10.2 was already configured", str(err.exception))
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ # For some reason mock_search.assert_called_once_with does not work
+ mock_search.assert_has_calls([mock.call("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")])
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.get_values")
+ @mock.patch("crmsh.corosync.make_value")
+ @mock.patch("crmsh.corosync.get_free_nodeid")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.utils.read_from_file")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.corosync.find_configured_ip")
+ def test_add_node_ucast(self, mock_find_ip, mock_conf, mock_read_file, mock_parser,
+ mock_free_id, mock_make_value, mock_get_values, mock_make_section, mock_str2file):
+ mock_parser_inst = mock.Mock()
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_parser.return_value = mock_parser_inst
+ mock_free_id.return_value = 2
+ mock_make_value.side_effect = [["value1"], ["value2"]]
+ mock_get_values.return_value = []
+ mock_make_section.side_effect = ["section1", "section2"]
+ mock_parser_inst.count.return_value = 2
+ mock_parser_inst.get.return_value = "net"
+ mock_parser_inst.to_string.return_value = "string data"
+
+ corosync.add_node_ucast(['10.10.10.1'])
+
+ mock_find_ip.assert_called_once_with(['10.10.10.1'])
+ mock_parser.assert_called_once_with("data")
+ mock_free_id.assert_called_once_with(mock_parser_inst)
+ mock_make_value.assert_has_calls([
+ mock.call('nodelist.node.ring0_addr', '10.10.10.1'),
+ mock.call('nodelist.node.nodeid', '2')
+ ])
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_make_section.assert_has_calls([
+ mock.call('nodelist', []),
+ mock.call('nodelist.node', ["value1", "value2"])
+ ])
+ mock_parser_inst.add.assert_has_calls([
+ mock.call('', 'section1'),
+ mock.call('nodelist', 'section2')
+ ])
+ mock_parser_inst.count.assert_called_once_with("nodelist.node")
+ mock_parser_inst.set.assert_has_calls([
+ mock.call('quorum.two_node', '1'),
+ mock.call('quorum.two_node', '0')
+ ])
+ mock_parser_inst.get.assert_called_once_with('quorum.device.model')
+ mock_parser_inst.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ def test_add_node_nodelist(self):
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F2)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ c = p.count('nodelist.node')
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), c + 1)
+ self.assertEqual(get_free_nodeid(p), nid + 1)
+
+ def test_remove_node(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.remove_section_where('nodelist.node', 'nodeid', '2')
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 4)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1'])
+
+ def test_bnc862577(self):
+ p = Parser(F3)
+ _valid(p)
+ self.assertEqual(p.count('service.ver'), 1)
+
+ def test_get_free_nodeid(self):
+ def ids(*lst):
+ class Ids(object):
+ def get_all(self, _arg):
+ return lst
+ return Ids()
+ self.assertEqual(1, corosync.get_free_nodeid(ids('2', '5')))
+ self.assertEqual(3, corosync.get_free_nodeid(ids('1', '2', '5')))
+ self.assertEqual(4, corosync.get_free_nodeid(ids('1', '2', '3')))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_crashtest_check.py b/test/unittests/test_crashtest_check.py
new file mode 100644
index 0000000..deb1ca5
--- /dev/null
+++ b/test/unittests/test_crashtest_check.py
@@ -0,0 +1,790 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import check, config
+
+
+class TestCheck(TestCase):
+
+ @mock.patch('crmsh.crash_test.check.check_cluster')
+ def test_check(self, mock_cluster_check):
+ ctx = mock.Mock(cluster_check=True)
+ check.check(ctx)
+ mock_cluster_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_firewall')
+ @mock.patch('crmsh.crash_test.check.check_time_service')
+ @mock.patch('crmsh.crash_test.check.check_my_hostname_resolves')
+ def test_check_environment(self, mock_hostname, mock_time, mock_firewall):
+ check.check_environment()
+ mock_hostname.assert_called_once_with()
+ mock_time.assert_called_once_with()
+ mock_firewall.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.check.crmshboot.my_hostname_resolves')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_my_hostname_resolves(self, mock_task_check, mock_hostname, mock_this_node):
+ mock_task_inst = mock.Mock()
+ mock_task_check.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_hostname.return_value = False
+ mock_this_node.return_value = "node1"
+
+ check.check_my_hostname_resolves()
+
+ mock_task_check.assert_called_once_with("Checking hostname resolvable")
+ mock_hostname.assert_called_once_with()
+ mock_task_inst.error.assert_called_once_with('Hostname "node1" is unresolvable.\n Please add an entry to /etc/hosts or configure DNS.')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_none(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.side_effect = [False, False, False]
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_has_calls([
+ mock.call('chronyd.service'),
+ mock.call('ntp.service'),
+ mock.call('ntpd.service')
+ ])
+ mock_task_inst.warn.assert_called_once_with("No NTP service found.")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_warn(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = False
+ mock_service_active.return_value = False
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_called_once_with("chronyd.service is available")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("chronyd.service is disabled"),
+ mock.call("chronyd.service is not active"),
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = True
+ mock_service_active.return_value = True
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("chronyd.service is available"),
+ mock.call("chronyd.service is enabled"),
+ mock.call("chronyd.service is active")
+ ])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_return(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ mock_run.return_value = (1, None, "error")
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("error")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_fail_to_get_port(self, mock_corosync_port):
+ mock_corosync_port.return_value = []
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("Can not get corosync's port")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ output_cmd = """
+ 1234/udp
+ 4444/tcp
+ """
+ mock_run.return_value = (0, output_cmd, None)
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("UDP port 5678 should open in firewalld")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+ task_inst.info.assert_called_once_with("UDP port 1234 is opened in firewalld")
+
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_not_intalled(self, mock_task, mock_installed):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.side_effect = [False, False]
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_has_calls([
+ mock.call("firewalld"),
+ mock.call("SuSEfirewall2")
+ ])
+ mock_task_inst.warn.assert_called_once_with("Failed to detect firewall")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_warn(self, mock_task, mock_installed, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = False
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_called_once_with("firewalld.service is available")
+ mock_task_inst.warn.assert_called_once_with("firewalld.service is not active")
+
+ @mock.patch('crmsh.crash_test.check.check_port_open')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall(self, mock_task, mock_installed, mock_active, mock_check_port):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = True
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("firewalld.service is available"),
+ mock.call("firewalld.service is active")
+ ])
+ mock_active.assert_called_once_with("firewalld")
+ mock_check_port.assert_called_once_with(mock_task_inst, "firewalld")
+
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster_return(self, mock_check_cluster):
+ mock_check_cluster.return_value = False
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_resources')
+ @mock.patch('crmsh.crash_test.check.check_nodes')
+ @mock.patch('crmsh.crash_test.check.check_fencing')
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster(self, mock_check_cluster, mock_check_fencing, mock_check_nodes, mock_check_resources):
+ mock_check_cluster.return_value = True
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+ mock_check_fencing.assert_called_once_with()
+ mock_check_nodes.assert_called_once_with()
+ mock_check_resources.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service_pacemaker_disable(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=False)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [False, True]
+ mock_active.side_effect = [True, False]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, False)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("pacemaker.service is disabled"),
+ mock.call("corosync.service is enabled")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_called_once_with("corosync.service is running")
+ mock_task_inst.error.assert_called_once_with("pacemaker.service is not running!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=True)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [True, True]
+ mock_active.side_effect = [True, True]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, True)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_has_calls([
+ mock.call("pacemaker.service is enabled"),
+ mock.call("corosync.service is running"),
+ mock.call("pacemaker.service is running")
+ ])
+ mock_task_inst.warn.assert_called_once_with("corosync.service is enabled")
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_stonith(self, mock_task, mock_fence_info):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=False)
+ mock_fence_info.return_value = mock_fence_info_inst
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_task_inst.warn.assert_called_once_with("stonith is disabled")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_resources(self, mock_task, mock_fence_info, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (1, None, None)
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_called_once_with("stonith is enabled")
+ mock_task_inst.warn.assert_called_once_with("No stonith resource configured!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_has_warn(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Stopped (disabled)", None)
+ mock_active.return_value = False
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("stonith resource stonith-sbd(external/sbd) is Stopped"),
+ mock.call("sbd service is not running!")
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Started node2", None)
+ mock_active.return_value = True
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is Started"),
+ mock.call("sbd service is running")
+ ])
+ mock_active.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_error(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_run.return_value = (1, None, "error data")
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.error.assert_called_once_with("run \"crm_mon -1\" error: error data")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: 15sp2-1 (version 2.0.3+20200511.2b248d828-1.10-2.0.3+20200511.2b248d828) - partition with quorum
+ * Last updated: Tue Nov 3 14:09:29 2020
+ * Last change: Tue Nov 3 13:47:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Online: [ 15sp2-1 ]
+ * OFFLINE: [ 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("DC node: 15sp2-1"),
+ mock.call("Cluster have quorum"),
+ mock.call("Online nodes: [ 15sp2-1 ]")
+ ])
+ mock_task_inst.warn.assert_called_once_with("OFFLINE nodes: [ 15sp2-2 ]")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_warn(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: NONE
+ * Last updated: Tue Nov 3 14:16:49 2020
+ * Last change: Tue Nov 3 14:09:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Node 15sp2-1: UNCLEAN (offline)
+ * Node 15sp2-2: UNCLEAN (offline)
+
+Active Resources:
+ * No active resources
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("Cluster lost quorum!"),
+ mock.call("Node 15sp2-1 is UNCLEAN!"),
+ mock.call("Node 15sp2-2 is UNCLEAN!")
+ ])
+
+ @mock.patch('crmsh.crash_test.check.completers.resources_stopped')
+ @mock.patch('crmsh.crash_test.check.completers.resources_started')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_resources(self, mock_task, mock_started, mock_stopped):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_started.return_value = ["r1", "r2"]
+ mock_stopped.return_value = ["r3", "r4"]
+
+ check.check_resources()
+
+ mock_task.assert_called_once_with("Checking resources")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("Started resources: r1,r2"),
+ mock.call("Stopped resources: r3,r4")
+ ])
+
+ # Test fix()
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_no_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has no valid candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = dev
+ check.fix(ctx)
+ mock_correct_sbd.assert_called_once_with(ctx, dev)
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_has_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has valid candidate
+ """
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = ""
+ mock_correct_sbd.return_value = ""
+ check.fix(ctx)
+ mock_correct_sbd.assert_not_called()
+
+ # Test check_sbd()
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_no_conf(cls, mock_os_path_exists,
+ mock_utils_msg_info, mock_run):
+ """
+ Test no configuration file
+ """
+ mock_os_path_exists.return_value = False
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD configuration file {} not found.".
+ format(config.SBD_CONF), to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_not_configured(cls, mock_os_path_exists, mock_utils_parse_sysconf,
+ mock_utils_msg_info, mock_run):
+ """
+ Test SBD device not configured
+ """
+ mock_os_path_exists.return_value = True
+ mock_utils_parse_sysconf.return_value = {}
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD DEVICE not used.", to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_valid(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_info, mock_is_valid_sbd, mock_run):
+ """
+ Test configured SBD device exist and valid
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = True
+
+ check.check_sbd()
+ mock_msg_info.assert_called_with("'{}' is a valid SBD device.".format(dev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_but_no_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid and no candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = ""
+
+ check.check_sbd()
+ mock_msg_warn.assert_has_calls(
+ [mock.call("Device '{}' is not valid for SBD, may need initialize.".
+ format(dev), to_stdout=False),
+ mock.call("Fail to find a valid candidate SBD device.",
+ to_stdout=False)])
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_exist_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "SBD device '{}' is not exist.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "Device '{}' is not valid for SBD, may need initialize.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ # Test correct_sbd()
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_conf(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [False, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Configure file {} not exist!'.
+ format(config.SBD_CONF))
+
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_dev(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Device {} not exist!'.format(dev))
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.TaskFixSBD.verify')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.remove')
+ @mock.patch('shutil.move')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_move, mock_remove,
+ mock_mkstemp, mock_sbd_verify, mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="SBD_DEVICE={}".format(dev)).return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_called_once_with(config.SBD_CONF, bak)
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_move.assert_called_once_with(edit, config.SBD_CONF)
+ mock_remove.assert_called()
+ mock_sbd_verify.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_run_exception(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_mkstemp, mock_msg_error,
+ mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="data2").return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+ mock_copymode.side_effect = Exception('Copy file error!')
+
+ with cls.assertRaises(cls, crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_has_calls([mock.call(config.SBD_CONF, bak),
+ mock.call(bak, config.SBD_CONF)])
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_msg_error.assert_called_once_with('Fail to modify file {}'.
+ format(config.SBD_CONF))
diff --git a/test/unittests/test_crashtest_main.py b/test/unittests/test_crashtest_main.py
new file mode 100644
index 0000000..02ae7b3
--- /dev/null
+++ b/test/unittests/test_crashtest_main.py
@@ -0,0 +1,215 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestContext(TestCase):
+
+ def test_context(self):
+ main.ctx.name = "xin"
+ self.assertEqual(main.ctx.name, "xin")
+
+
+class TestMain(TestCase):
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument_help(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.parse_argument(ctx)
+
+ mock_parser_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=False, env_check=True, sbd=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ main.parse_argument(ctx)
+ self.assertEqual(ctx.env_check, True)
+ self.assertEqual(ctx.sbd, True)
+
+ mock_parser_inst.print_help.assert_not_called()
+
+ def test_setup_basic_context(self):
+ ctx = mock.Mock(process_name="crash_test")
+ main.setup_basic_context(ctx)
+ self.assertEqual(ctx.var_dir, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.report_path, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.jsonfile, "/var/lib/crmsh/crash_test/crash_test.json")
+ self.assertEqual(ctx.logfile, "/var/log/crmsh/crmsh.log")
+
+ @mock.patch('logging.Logger.fatal')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_non_root(self, mock_setup, mock_parse, mock_is_root, mock_log_fatal):
+ mock_is_root.return_value = False
+ ctx = mock.Mock(process_name="crash_test")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_log_fatal.assert_called_once_with("{} can only be executed as user root!".format(ctx.process_name))
+
+ @mock.patch('crmsh.crash_test.main.split_brain')
+ @mock.patch('crmsh.crash_test.main.fence_node')
+ @mock.patch('crmsh.crash_test.main.kill_process')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.makedirs')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run(self, mock_setup, mock_parse, mock_is_root, mock_exists, mock_mkdir,
+ mock_fix, mock_check, mock_kill, mock_fence, mock_sb):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = False
+
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_mkdir.assert_called_once_with(ctx.var_dir, exist_ok=True)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_kill.assert_called_once_with(ctx)
+ mock_fence.assert_called_once_with(ctx)
+ mock_sb.assert_called_once_with(ctx)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_except(self, mock_setup, mock_parse, mock_is_root, mock_exists,
+ mock_fix, mock_check, mock_dumps):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = True
+ mock_check.side_effect = KeyboardInterrupt
+
+ with self.assertRaises(KeyboardInterrupt):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_dumps.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return_pacemaker_loop(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=True, loop=True, sbd=None, corosync=None)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=False, sbd=False, corosync=False)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_process(self, mock_task_kill):
+ mock_task_kill_inst = mock.Mock()
+ mock_task_kill.return_value = mock_task_kill_inst
+ mock_task_kill_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(sbd=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.kill_process(ctx)
+
+ mock_task_kill_inst.pre_check.assert_called_once_with()
+ mock_task_kill_inst.print_header.assert_called_once_with()
+ mock_task_kill_inst.enable_report.assert_called_once_with()
+ mock_task_kill_inst.run.assert_called_once_with()
+ mock_task_kill_inst.wait.assert_called_once_with()
+ mock_task_kill_inst.error.assert_called_once_with("error data")
+
+ def test_split_brain_return(self):
+ ctx = mock.Mock(sp_iptables=None)
+ main.split_brain(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True, force=False)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.do_block.return_value.__enter__ = mock.Mock()
+ mock_sp_inst.do_block.return_value.__exit__ = mock.Mock()
+
+ main.split_brain(ctx)
+
+ mock_sp.assert_called_once_with(False)
+ mock_sp_inst.pre_check.assert_called_once_with()
+ mock_sp_inst.print_header.assert_called_once_with()
+ mock_sp_inst.do_block.assert_called_once_with()
+ mock_sp_inst.run.assert_called_once_with()
+ mock_sp_inst.wait.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain_exception(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.pre_check.side_effect = task.TaskError("error data")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.split_brain(ctx)
+
+ mock_sp_inst.error.assert_called_once_with("error data")
+
+ def test_fence_node_return(self):
+ ctx = mock.Mock(fence_node=None)
+ main.fence_node(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskFence')
+ def test_fence_node(self, mock_task_fence):
+ mock_task_fence_inst = mock.Mock()
+ mock_task_fence.return_value = mock_task_fence_inst
+ mock_task_fence_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(fence_node=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.fence_node(ctx)
+
+ mock_task_fence_inst.pre_check.assert_called_once_with()
+ mock_task_fence_inst.print_header.assert_called_once_with()
+ mock_task_fence_inst.run.assert_called_once_with()
+ mock_task_fence_inst.wait.assert_called_once_with()
+ mock_task_fence_inst.error.assert_called_once_with("error data")
+
+ @classmethod
+ def test_MyArgParseFormatter(cls):
+ main.MyArgParseFormatter("test")
diff --git a/test/unittests/test_crashtest_task.py b/test/unittests/test_crashtest_task.py
new file mode 100644
index 0000000..3b4c092
--- /dev/null
+++ b/test/unittests/test_crashtest_task.py
@@ -0,0 +1,777 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+from datetime import datetime
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestTaskKill(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(current_case="sbd", loop=False)
+ self.task_kill_inst = task.TaskKill(ctx)
+ ctx2 = mock.Mock(current_case="sbd", loop=True)
+ self.task_kill_inst_loop = task.TaskKill(ctx2)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('os.path.isdir')
+ def test_enable_report_error(self, mock_isdir):
+ mock_isdir.return_value = False
+ main.ctx = mock.Mock(report_path="/path")
+ with self.assertRaises(task.TaskError) as error:
+ self.task_kill_inst.enable_report()
+ self.assertEqual("/path is not a directory", str(error.exception))
+ mock_isdir.assert_called_once_with("/path")
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report_looping(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst_loop.enable_report()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst.enable_report()
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Force kill sbd
+Looping Kill: False
+Expected State: a) sbd process restarted
+ b) Or, this node fenced.
+"""
+ res = self.task_kill_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_kill_inst.build_base_result = mock.Mock()
+ self.task_kill_inst.result = {}
+ self.task_kill_inst.prev_task_list = []
+ self.task_kill_inst.to_json()
+ self.task_kill_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_to_report_return(self):
+ self.task_kill_inst.report = False
+ self.task_kill_inst.to_report()
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('crmsh.crash_test.task.TaskKill.header')
+ def test_to_report(self, mock_header, mock_open_file, mock_fsync):
+ mock_header.return_value = "#### header"
+ self.task_kill_inst.report = True
+ self.task_kill_inst.messages = [["info", "data", "2021"]]
+ self.task_kill_inst.explain = "explain"
+ self.task_kill_inst.report_file = "report_file1"
+ file_handle = mock_open_file.return_value.__enter__.return_value
+
+ self.task_kill_inst.to_report()
+
+ file_handle.write.assert_has_calls([
+ mock.call("#### header"),
+ mock.call("\nLog:\n"),
+ mock.call("2021 INFO:data\n"),
+ mock.call("\nTestcase Explained:\n"),
+ mock.call("explain\n")
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check(self, mock_pre_check, mock_status):
+ mock_status.return_value = (False, 100)
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.pre_check()
+ self.assertEqual("Process sbd is not running!", str(err.exception))
+ mock_pre_check.assert_called_once_with()
+ mock_status.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.crash_test.task.TaskKill.process_monitor')
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_run(self, mock_status, mock_info, mock_run, mock_thread, mock_fence_monitor, mock_process_monitor):
+ mock_status.side_effect = [(False, -1), (True, 100)]
+ mock_thread_fence_inst = mock.Mock()
+ mock_thread_restart_inst = mock.Mock()
+ mock_thread.side_effect = [mock_thread_fence_inst, mock_thread_restart_inst]
+
+ self.task_kill_inst.run()
+
+ mock_status.assert_has_calls([mock.call("sbd"), mock.call("sbd")])
+ mock_info.assert_has_calls([
+ mock.call('Process sbd(100) is running...'),
+ mock.call('Trying to run "killall -9 sbd"')
+ ])
+ mock_run.assert_called_once_with("killall -9 sbd")
+ mock_thread.assert_has_calls([
+ mock.call(target=mock_fence_monitor),
+ mock.call(target=mock_process_monitor),
+ ])
+ mock_thread_fence_inst.start.assert_called_once_with()
+ mock_thread_restart_inst.start.assert_called_once_with()
+
+ def test_wait_exception(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = False
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.wait()
+ self.assertEqual("Process sbd is not restarted!", str(err.exception))
+
+ def test_wait(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = True
+
+ self.task_kill_inst.wait()
+
+ self.task_kill_inst.thread_stop_event.set.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_process_monitor(self, mock_status, mock_info, mock_sleep):
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event.is_set.side_effect = [False, False]
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ mock_status.side_effect = [(False, -1), (True, 100)]
+
+ self.task_kill_inst.process_monitor()
+
+ self.task_kill_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_status.assert_has_calls([
+ mock.call("sbd"),
+ mock.call("sbd")
+ ])
+ mock_info.assert_called_once_with("Process sbd(100) is restarted!")
+ self.task_kill_inst.restart_happen_event.set.assert_called_once_with()
+ mock_sleep.assert_called_once_with(1)
+
+
+class TestTaskCheck(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_msg_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list=[{"process_name": "xin", "age": 38}])
+ self.task_check_inst = task.TaskCheck("task check job1", quiet=False)
+ self.task_check_inst_quiet = task.TaskCheck("task check job1", quiet=True)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.MyLoggingFormatter')
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_to_stdout(self, mock_manage_handler, mock_get_handler, mock_myformatter):
+ mock_manage_handler.return_value.__enter__ = mock.Mock()
+ mock_manage_handler.return_value.__exit__ = mock.Mock()
+
+ task.logger = mock.Mock()
+ task.logger.info = mock.Mock()
+ task.logger.log = mock.Mock()
+
+ get_handler_inst1 = mock.Mock()
+ get_handler_inst1.setFormatter = mock.Mock()
+ get_handler_inst2 = mock.Mock()
+ get_handler_inst2.setFormatter = mock.Mock()
+ mock_get_handler.side_effect = [get_handler_inst1, get_handler_inst2]
+
+ myformatter_inst1 = mock.Mock()
+ myformatter_inst2 = mock.Mock()
+ mock_myformatter.side_effect = [myformatter_inst1, myformatter_inst2]
+
+ self.task_check_inst.messages = [("info", "info message"), ("warn", "warn message")]
+ utils.CGREEN = ""
+ utils.CEND = ""
+ utils.CRED = ""
+
+ self.task_check_inst.to_stdout()
+
+ mock_manage_handler.assert_called_once_with("file", keep=False)
+ mock_get_handler.assert_has_calls([
+ mock.call(task.logger, "stream"),
+ mock.call(task.logger, "stream")
+ ])
+ get_handler_inst1.setFormatter.assert_called_once_with(myformatter_inst1)
+ get_handler_inst2.setFormatter.assert_called_once_with(myformatter_inst2)
+ mock_myformatter.assert_has_calls([
+ mock.call(flush=False),
+ mock.call()
+ ])
+ task.logger.info.assert_called_once_with('task check job1 [Pass]', extra={'timestamp': '[2019/07/10 01:15:15]'})
+ task.logger.log.assert_has_calls([
+ mock.call(20, 'info message', extra={'timestamp': ' '}).
+ mock.call(30, 'warn message', extra={'timestamp': ' '})
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_check_inst.build_base_result = mock.Mock()
+ self.task_check_inst.result = {}
+ self.task_check_inst.to_json()
+ self.task_check_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_print_result(self):
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.to_json = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_called_once_with()
+ self.task_check_inst.to_json.assert_called_once_with()
+
+ def test_print_result_quiet(self):
+ self.task_check_inst.quiet = True
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_not_called()
+
+ def test_run(self):
+ self.task_check_inst.print_result = mock.Mock()
+ with self.task_check_inst.run():
+ pass
+ self.task_check_inst.print_result.assert_called_once_with()
+
+
+class TestTaskSplitBrain(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ self.task_sp_inst = task.TaskSplitBrain()
+ self.task_sp_inst.fence_action = "reboot"
+ self.task_sp_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Simulate split brain by blocking traffic between cluster nodes
+Expected Result: One of nodes get fenced
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_sp_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_sp_inst.result = {}
+ self.task_sp_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_online_nodes):
+ mock_run.return_value = (0, None, None)
+ mock_online_nodes.return_value = ["node1"]
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("At least two nodes online!", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_online_nodes.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.peer_node_list')
+ def test_do_block_iptables(self, mock_peer_list, mock_info, mock_get_iplist, mock_run):
+ mock_peer_list.return_value = ["node1", "node2"]
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.do_block_iptables()
+ mock_peer_list.assert_called_once_with()
+ mock_info.assert_has_calls([
+ mock.call("Trying to temporarily block node1 communication ip"),
+ mock.call("Trying to temporarily block node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain.un_block_iptables')
+ def test_un_block(self, mock_unblock_iptables):
+ self.task_sp_inst.un_block()
+ mock_unblock_iptables.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_un_block_iptables(self, mock_info, mock_get_iplist, mock_run):
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.peer_nodelist = ["node1", "node2"]
+ self.task_sp_inst.un_block_iptables()
+ mock_info.assert_has_calls([
+ mock.call("Trying to recover node1 communication ip"),
+ mock.call("Trying to recover node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ def test_run(self, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_sp_inst.run()
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ def test_wait(self):
+ self.task_sp_inst.fence_finish_event = mock.Mock()
+ self.task_sp_inst.fence_finish_event.wait.return_value = False
+ self.task_sp_inst.thread_stop_event = mock.Mock()
+ self.task_sp_inst.wait()
+ self.task_sp_inst.fence_finish_event.wait.assert_called_once_with(60)
+ self.task_sp_inst.thread_stop_event.set.assert_called_once_with()
+
+
+class TestFence(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(fence_node="node1", yes=False)
+ self.task_fence_inst = task.TaskFence(ctx)
+ self.task_fence_inst.fence_action = "reboot"
+ self.task_fence_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Fence node node1
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_fence_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_fence_inst.result = {}
+ self.task_fence_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which crm_node")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.check_node_status')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_node_status):
+ mock_run.side_effect = [(0, None, None), (0, None, None), (0, None, None)]
+ mock_node_status.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("Node \"node1\" not in cluster!", str(err.exception))
+ mock_run.assert_has_calls([
+ mock.call("which crm_node"),
+ mock.call("which stonith_admin"),
+ mock.call("which crm_attribute")
+ ])
+ mock_node_status.assert_called_once_with("node1", "member")
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_run(self, mock_info, mock_run, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_fence_inst.run()
+ mock_info.assert_called_once_with("Trying to fence node \"node1\"")
+ mock_run.assert_called_once_with("crm_attribute -t status -N 'node1' -n terminate -v true")
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait_this_node(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node1"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = True
+
+ self.task_fence_inst.wait()
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for self reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node2"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = None
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.wait()
+ self.assertEqual("Target fence node \"node1\" still alive", str(err.exception))
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for node \"node1\" reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+
+class TestTask(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list={"process_name": "xin", "age": 38})
+ self.task_inst = task.Task("task description", flush=True)
+ mock_now.assert_called_once_with()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ self.task_inst.header()
+
+ def test_to_report(self):
+ self.task_inst.to_report()
+
+ def test_to_json(self):
+ self.task_inst.to_json()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception(self, mock_active):
+ mock_active.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Cluster not running!", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception_no_fence(self, mock_active):
+ mock_active.return_value = True
+ self.task_inst.get_fence_info = mock.Mock()
+ self.task_inst.fence_enabled = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Require stonith enabled", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+ self.task_inst.get_fence_info.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ def test_get_fence_info(self, mock_fence_info):
+ mock_fence_info_inst = mock.Mock()
+ mock_fence_info.return_value = mock_fence_info_inst
+ self.task_inst.get_fence_info()
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_info(self, mock_info):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.info("info message")
+ self.task_inst.msg_append.assert_called_once_with("info", "info message")
+ mock_info.assert_called_once_with("info message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ def test_warn(self, mock_warn):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.warn("warn message")
+ self.task_inst.msg_append.assert_called_once_with("warn", "warn message")
+ mock_warn.assert_called_once_with("warn message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ def test_error(self, mock_error):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.error("error message")
+ self.task_inst.msg_append.assert_called_once_with("error", "error message")
+ mock_error.assert_called_once_with("error message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.now')
+ def test_msg_append(self, mock_now):
+ self.task_inst.to_json = mock.Mock()
+ self.task_inst.to_report = mock.Mock()
+ self.task_inst.msg_append("error", "warn message")
+ mock_now.assert_called_once_with()
+ self.task_inst.to_json.assert_called_once_with()
+ self.task_inst.to_report.assert_called_once_with()
+
+ def test_build_base_result(self):
+ self.task_inst.build_base_result()
+ expected_result = {
+ "Timestamp": self.task_inst.timestamp,
+ "Description": self.task_inst.description,
+ "Messages": []
+ }
+ self.assertDictEqual(expected_result, self.task_inst.result)
+
+ @mock.patch('crmsh.crash_test.utils.warning_ask')
+ def test_print_header(self, mock_ask):
+ self.task_inst.header = mock.Mock()
+ self.task_inst.info = mock.Mock()
+ mock_ask.return_value = False
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ self.task_inst.print_header()
+
+ self.task_inst.header.assert_called_once_with()
+ mock_ask.assert_called_once_with(task.Task.REBOOT_WARNING)
+ self.task_inst.info.assert_called_once_with("Testcase cancelled")
+
+ @mock.patch('crmsh.crash_test.utils.str_to_datetime')
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_fence_action_monitor(self, mock_run, mock_info, mock_sleep, mock_datetime):
+ self.task_inst.thread_stop_event = mock.Mock()
+ self.task_inst.thread_stop_event.is_set.side_effect = [False, False, False, False]
+ self.task_inst.fence_start_event = mock.Mock()
+ self.task_inst.fence_finish_event = mock.Mock()
+ output = "Pending Fencing Actions:\n * reboot of 15sp2-2 pending: client=pacemaker-controld.2430, origin=15sp2-1"
+ output2 = "Node 15sp2-2 last fenced at: Tue Jan 19 16:08:37 2021"
+ mock_run.side_effect = [(1, None, None), (0, output, None), (1, None, None), (0, output2, None)]
+ self.task_inst.timestamp = "2021/01/19 16:08:24"
+ mock_datetime.side_effect = [
+ datetime.strptime(self.task_inst.timestamp, '%Y/%m/%d %H:%M:%S'),
+ datetime.strptime("Tue Jan 19 16:08:37 2021", '%a %b %d %H:%M:%S %Y')
+ ]
+
+ self.task_inst.fence_action_monitor()
+
+ self.task_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call(),
+ mock.call(),
+ mock.call()
+ ])
+ mock_run.assert_has_calls([
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2")),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2"))
+ ])
+ mock_info.assert_has_calls([
+ mock.call("Node \"15sp2-2\" will be fenced by \"15sp2-1\"!"),
+ mock.call("Node \"15sp2-2\" was successfully fenced by \"15sp2-1\"")
+ ])
+ self.task_inst.fence_start_event.set.assert_called_once_with()
+ self.task_inst.fence_finish_event.set.assert_called_once_with()
+
+class TestFixSBD(TestCase):
+ """
+ Class to test TaskFixSBD of task.py
+ All tested in test_crash_test.py except verify()
+ """
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info, mock_mkstemp, mock_isfile, mock_open):
+ """
+ Test setUp.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ self.task_fixsbd = task.TaskFixSBD(dev, force=False)
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+ pass
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_verify_succeed(self, mock_msg_info, mock_isfile, mock_open, mock_fsync):
+ """
+ Test verify successful.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ mock_msg_info.assert_called_once_with('SBD DEVICE change succeed',
+ to_stdout=True)
+ mock_fsync.assert_called()
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ def test_verify_fail(self, mock_isfile, mock_open):
+ """
+ Test verify failed.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ dev_cur = "/dev/disk/by-id/scsi-SATA_ST2000LM007-no_change"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev_cur)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ self.assertEqual("Fail to replace SBD device {} in {}!".
+ format(dev, config.SBD_CONF), str(err.exception))
diff --git a/test/unittests/test_crashtest_utils.py b/test/unittests/test_crashtest_utils.py
new file mode 100644
index 0000000..f8a579b
--- /dev/null
+++ b/test/unittests/test_crashtest_utils.py
@@ -0,0 +1,540 @@
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+import logging
+
+from crmsh.crash_test import utils, main, config
+
+
+class TestMyLoggingFormatter(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+
+class TestFenceInfo(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_false(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, False)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_true(self, mock_get_property):
+ mock_get_property.return_value = "True"
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, True)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action_none(self, mock_get_property, mock_error):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, None)
+ mock_get_property.assert_called_once_with("stonith-action")
+ mock_error.assert_called_once_with('Cluster property "stonith-action" should be reboot|off|poweroff')
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action(self, mock_get_property):
+ mock_get_property.return_value = "reboot"
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, "reboot")
+ mock_get_property.assert_called_once_with("stonith-action")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout(self, mock_get_property):
+ mock_get_property.return_value = "60s"
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, "60")
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout_default(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, config.FENCE_TIMEOUT)
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+
+class TestUtils(TestCase):
+ '''
+ Unitary tests for crash_test/utils.py
+ '''
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_now(self, mock_datetime):
+ mock_now = mock.Mock()
+ mock_datetime.now.return_value = mock_now
+ mock_now.strftime.return_value = "2019/07/05 14:44:55"
+
+ result = utils.now()
+
+ self.assertEqual(result, "2019/07/05 14:44:55")
+ mock_datetime.now.assert_called_once_with()
+ mock_now.strftime.assert_called_once_with("%Y/%m/%d %H:%M:%S")
+
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ def test_manage_handler(self, mock_get_handler):
+ mock_get_handler.return_value = "handler"
+ utils.logger = mock.Mock()
+ utils.logger.removeHandler = mock.Mock()
+ utils.logger.addHandler = mock.Mock()
+
+ with utils.manage_handler("type1", keep=False):
+ pass
+
+ mock_get_handler.assert_called_once_with(utils.logger, "type1")
+ utils.logger.removeHandler.assert_called_once_with("handler")
+ utils.logger.addHandler.assert_called_once_with("handler")
+
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_msg_raw(self, mock_handler):
+ utils.logger = mock.Mock()
+ utils.logger.log = mock.Mock()
+ utils.msg_raw("level1", "msg1")
+ mock_handler.assert_called_once_with("console", True)
+ utils.logger.log.assert_called_once_with("level1", "msg1")
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_info(self, mock_raw):
+ utils.msg_info("msg1")
+ mock_raw.assert_called_once_with(logging.INFO, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_warn(self, mock_raw):
+ utils.msg_warn("msg1")
+ mock_raw.assert_called_once_with(logging.WARNING, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_error(self, mock_raw):
+ utils.msg_error("msg1")
+ mock_raw.assert_called_once_with(logging.ERROR, "msg1", True)
+
+ @mock.patch('os.fsync')
+ @mock.patch('json.dumps')
+ @mock.patch('builtins.open', create=True)
+ def test_json_dumps(self, mock_open_file, mock_dumps, mock_fsync):
+ main.ctx = mock.Mock(jsonfile="file1", task_list={"process_name": "xin", "age": 38})
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_dumps.return_value = "data"
+
+ utils.json_dumps()
+
+ mock_open_file.assert_called_once_with("file1", "w")
+ mock_dumps.assert_called_once_with(main.ctx.task_list, indent=2)
+ file_handle.write.assert_called_once_with("data")
+ file_handle.flush.assert_called_once_with()
+ mock_fsync.assert_called_once_with(file_handle)
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.this_node')
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node_false(self, mock_run, mock_error, mock_this_node):
+ mock_run.return_value = (1, None, "error data")
+ mock_this_node.return_value = "node1"
+
+ res = utils.this_node()
+ self.assertEqual(res, "node1")
+
+ mock_run.assert_called_once_with("crm_node --name")
+ mock_error.assert_called_once_with("error data")
+ mock_this_node.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.this_node()
+ self.assertEqual(res, "data")
+ mock_run.assert_called_once_with("crm_node --name")
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_str_to_datetime(self, mock_datetime):
+ utils.str_to_datetime("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+ mock_datetime.strptime.assert_called_once_with("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_corosync_port_list(self, mock_run):
+ output = """
+totem.interface.0.bindnetaddr (str) = 10.10.10.121
+totem.interface.0.mcastaddr (str) = 239.101.40.63
+totem.interface.0.mcastport (u16) = 5405
+totem.interface.0.ttl (u8) = 1
+totem.interface.1.bindnetaddr (str) = 20.20.20.121
+totem.interface.1.mcastaddr (str) = 239.6.213.31
+totem.interface.1.mcastport (u16) = 5407
+totem.interface.1.ttl (u8) = 1
+ """
+ mock_run.return_value = (0, output, None)
+ result = utils.corosync_port_list()
+ expected = ['5405', '5407']
+ self.assertListEqual(result, expected)
+ mock_run.assert_called_once_with("corosync-cmapctl totem.interface")
+
+ def test_get_handler(self):
+ mock_handler1 = mock.Mock(_name="test1_handler")
+ mock_handler2 = mock.Mock(_name="test2_handler")
+ mock_logger = mock.Mock(handlers=[mock_handler1, mock_handler2])
+ res = utils.get_handler(mock_logger, "test1_handler")
+ self.assertEqual(res, mock_handler1)
+
+ @mock.patch('os.getuid')
+ def test_is_root(self, mock_getuid):
+ mock_getuid.return_value = 0
+ self.assertEqual(utils.is_root(), True)
+ mock_getuid.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status_false(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/cmd2\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/cmd2\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "cmd2"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, False)
+ self.assertEqual(pid, -1)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/cmd2\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/sbd\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/sbd\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "sbd"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, True)
+ self.assertEqual(pid, 2)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/sbd\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status_error_cmd(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ res = utils.check_node_status("node1", "member")
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status(self, mock_run, mock_error):
+ output = """
+1084783297 15sp2-1 member
+1084783193 15sp2-2 lost
+ """
+ mock_run.return_value = (0, output, None)
+
+ res = utils.check_node_status("15sp2-2", "member")
+ self.assertEqual(res, False)
+ res = utils.check_node_status("15sp2-1", "member")
+ self.assertEqual(res, True)
+
+ mock_run.assert_has_calls([
+ mock.call("crm_node -l"),
+ mock.call("crm_node -l")
+ ])
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes_empty(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.online_nodes()
+ self.assertEqual(res, [])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes(self, mock_run):
+ output = """
+Node List:
+ * Online: [ 15sp2-1 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+ res = utils.online_nodes()
+ self.assertEqual(res, ["15sp2-1", "15sp2-2"])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list_empty(self, mock_online):
+ mock_online.return_value = None
+ res = utils.peer_node_list()
+ self.assertEqual(res, [])
+ mock_online.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list(self, mock_online, mock_this_node):
+ mock_online.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.peer_node_list()
+ self.assertEqual(res, ["node2"])
+ mock_online.assert_called_once_with()
+
+ # Test is_valid_sbd():
+ @classmethod
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_exist(cls, mock_os_path_exists):
+ """
+ Test device not exist
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = False
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_cmd_error(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not valid sbd
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (-1, None, "Unknown error!")
+ mock_msg_err.return_value = ""
+
+ res = utils.is_valid_sbd(dev)
+ mock_msg_err.assert_called_once_with("Unknown error!")
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ err_output = """
+==Dumping header on disk {}
+==Header on disk {} NOT dumped
+sbd failed; please check the logs.
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (1, "==Dumping header on disk {}".format(dev),
+ err_output)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+ mock_msg_err.assert_called_once_with(err_output)
+
+ @classmethod
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_is_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ std_output = """
+==Dumping header on disk {}
+Header version : 2.1
+UUID : f4c99362-6522-46fc-8ce4-7db60aff19bb
+Number of slots : 255
+Sector size : 512
+Timeout (watchdog) : 5
+Timeout (allocate) : 2
+Timeout (loop) : 1
+Timeout (msgwait) : 10
+==Header on disk {} is dumped
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (0, std_output, None)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is True
+
+ # Test find_candidate_sbd() and _find_match_count()
+ @classmethod
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_dev(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob):
+ """
+ Test no suitable device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = []
+
+ res = utils.find_candidate_sbd("/not-exist-folder/not-exist-dev")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_can(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test no valid candidate device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_DE_devC",
+ "/dev/disk/by-id/scsi-label_DE_devD"]
+ mock_is_valid_sbd.side_effect = [False, False, False, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_has_multi(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test has multiple valid candidate devices
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_CN_devC",
+ "/dev/disk/by-id/scsi-label_CN_devD",
+ "/dev/disk/by-id/scsi-mp_China_devE",
+ "/dev/disk/by-id/scsi-mp_China_devF"]
+ mock_is_valid_sbd.side_effect = [True, False, False, True, True, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == "/dev/disk/by-id/scsi-label_CN_devD"
diff --git a/test/unittests/test_gv.py b/test/unittests/test_gv.py
new file mode 100644
index 0000000..fda7272
--- /dev/null
+++ b/test/unittests/test_gv.py
@@ -0,0 +1,36 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+import re
+
+from crmsh import crm_gv
+from crmsh import cibconfig
+
+
+def test_digits_ident():
+ g = crm_gv.gv_types["dot"]()
+ cibconfig.set_graph_attrs(g, ".")
+
+ g.new_node("1a", top_node=True)
+ g.new_attr("1a", 'label', "1a")
+ g.new_node("a", top_node=True)
+ g.new_attr("a", 'label', "a")
+
+ expected = [
+ 'fontname="Helvetica";',
+ 'fontsize="11";',
+ 'compound="true";',
+ '"1a" [label="1a"];',
+ 'a [label="a"];',
+ ]
+ out = '\n'.join(g.repr()).replace('\t', '')
+
+ for line in re.match(
+ r'^digraph G {\n\n(?P<expected>.*)\n}$', out, re.M | re.S
+ ).group('expected').split('\n'):
+ assert line in expected
+ expected.remove(line)
+
+ assert len(expected) == 0
diff --git a/test/unittests/test_handles.py b/test/unittests/test_handles.py
new file mode 100644
index 0000000..54cd634
--- /dev/null
+++ b/test/unittests/test_handles.py
@@ -0,0 +1,166 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import handles
+
+
+def test_basic():
+ t = """{{foo}}"""
+ assert "hello" == handles.parse(t, {'foo': 'hello'})
+ t = """{{foo:bar}}"""
+ assert "hello" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{wiz}}"""
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{foo}}.{{wiz}}"""
+ assert "a.b" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+ t = """Here's a line of text
+ followed by another line
+ followed by some {{foo}}.{{wiz}}
+ and then some at the end"""
+ assert """Here's a line of text
+ followed by another line
+ followed by some a.b
+ and then some at the end""" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+
+
+def test_weird_chars():
+ t = "{{foo#_bar}}"
+ assert "hello" == handles.parse(t, {'foo#_bar': 'hello'})
+ t = "{{_foo$bar_}}"
+ assert "hello" == handles.parse(t, {'_foo$bar_': 'hello'})
+
+
+def test_conditional():
+ t = """{{#foo}}before{{foo:bar}}after{{/foo}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ assert "" == handles.parse(t, {'faa': {'bar': 'hello'}})
+
+ t = """{{#cond}}before{{foo:bar}}after{{/cond}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': True})
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': False})
+
+
+def test_iteration():
+ t = """{{#foo}}!{{foo:bar}}!{{/foo}}"""
+ assert "!hello!!there!" == handles.parse(t, {'foo': [{'bar': 'hello'}, {'bar': 'there'}]})
+
+
+def test_result():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_result2():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+{{#obj}}
+{{obj}}
+{{/obj}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+primitive d0 Dummy
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_mustasche():
+ t = """Hello {{name}}
+You have just won {{value}} dollars!
+{{#in_ca}}
+Well, {{taxed_value}} dollars, after taxes.
+{{/in_ca}}
+"""
+ v = {
+ "name": "Chris",
+ "value": 10000,
+ "taxed_value": 10000 - (10000 * 0.4),
+ "in_ca": True
+ }
+
+ assert """Hello Chris
+You have just won 10000 dollars!
+Well, 6000.0 dollars, after taxes.
+""" == handles.parse(t, v)
+
+
+def test_invert():
+ t = """{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """
+No repos :(
+""" == handles.parse(t, v)
+
+
+def test_invert_2():
+ t = """foo
+{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+bar
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """foo
+No repos :(
+bar
+""" == handles.parse(t, v)
+
+
+def test_cib():
+ t = """{{filesystem}}
+{{exportfs}}
+{{rootfs}}
+{{virtual-ip}}
+clone c-{{rootfs:id}} {{rootfs:id}}
+group g-nfs
+ {{exportfs:id}}
+ {{virtual-ip:id}}
+order base-then-nfs inf: {{filesystem:id}} g-nfs
+colocation nfs-with-base inf: g-nfs {{filesystem:id}}
+order rootfs-before-nfs inf: c-{{rootfs:id}} g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-{{rootfs:id}}
+"""
+ r = """primitive fs1 Filesystem
+primitive efs exportfs
+primitive rfs rootfs
+primitive vip IPaddr2
+ params ip=192.168.0.2
+clone c-rfs rfs
+group g-nfs
+ efs
+ vip
+order base-then-nfs inf: fs1 g-nfs
+colocation nfs-with-base inf: g-nfs fs1
+order rootfs-before-nfs inf: c-rfs g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-rfs
+"""
+ v = {
+ 'filesystem': handles.value({'id': 'fs1'}, 'primitive fs1 Filesystem'),
+ 'exportfs': handles.value({'id': 'efs'}, 'primitive efs exportfs'),
+ 'rootfs': handles.value({'id': 'rfs'}, 'primitive rfs rootfs'),
+ 'virtual-ip': handles.value({'id': 'vip'},
+ 'primitive vip IPaddr2\n params ip=192.168.0.2'),
+ }
+ assert r == handles.parse(t, v)
diff --git a/test/unittests/test_lock.py b/test/unittests/test_lock.py
new file mode 100644
index 0000000..a8dc982
--- /dev/null
+++ b/test/unittests/test_lock.py
@@ -0,0 +1,271 @@
+"""
+Unitary tests for crmsh/lock.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2020-12-18
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import lock, config
+
+
+class TestLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.Lock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.local_inst = lock.Lock()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, "output data", None)
+ rc, out, err = self.local_inst._run("test_cmd")
+ mock_run.assert_called_once_with("test_cmd")
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir_false(self, mock_run):
+ mock_run.return_value = (1, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, False)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, True)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ def test_lock_or_fail(self, mock_create):
+ mock_create.return_value = False
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.local_inst._lock_or_fail()
+ self.assertEqual("Failed to claim lock (the lock directory exists at {})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+ mock_create.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_unlock(self, mock_run):
+ self.local_inst.lock_owner = True
+ self.local_inst._unlock()
+ mock_run.assert_called_once_with("rm -rf {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.local_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.local_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+
+class TestRemoteLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.RemoteLock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.lock_inst = lock.RemoteLock("node1")
+ self.lock_inst_no_wait = lock.RemoteLock("node1", wait=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run_ssh_error(self, mock_run):
+ mock_run.return_value = (255, None, "ssh error")
+ with self.assertRaises(lock.SSHError) as err:
+ self.lock_inst._run("cmd")
+ self.assertEqual("ssh error", str(err.exception))
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.lock_inst._run("cmd")
+ self.assertEqual(res, mock_run.return_value)
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ def test_lock_timeout_error_format(self):
+ config.core.lock_timeout = "pwd"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Invalid format of core.lock_timeout(should be a number)", str(err.exception))
+
+ def test_lock_timeout_min_error(self):
+ config.core.lock_timeout = "12"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Minimum value of core.lock_timeout should be 120", str(err.exception))
+
+ def test_lock_timeout(self):
+ config.core.lock_timeout = "130"
+ self.assertEqual(self.lock_inst.lock_timeout, 130)
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist_error(self, mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst._get_online_nodelist()
+ self.assertEqual("error data", str(err.exception))
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist(self, mock_run):
+ output = """
+ 1084783297 15sp2-1 member
+ 1084783193 15sp2-2 lost
+ 1084783331 15sp2-3 member
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.lock_inst._get_online_nodelist()
+ self.assertEqual(res, ["15sp2-1", "15sp2-3"])
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_break(self, mock_time, mock_time_out, mock_create):
+ mock_time.return_value = 10000
+ mock_time_out.return_value = 120
+ mock_create.return_value = True
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_called_once_with()
+ mock_time_out.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_timed_out(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10121]
+ mock_time_out.return_value = 120
+ mock_create.return_value = False
+ mock_get_nodelist.return_value = ["node2"]
+
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.lock_inst._lock_or_wait()
+ self.assertEqual("Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (node1:{})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+
+ mock_time.assert_has_calls([ mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_called_once_with()
+ mock_get_nodelist.assert_called_once_with()
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_called_once_with(10)
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_again(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10010, 10020]
+ mock_time_out.side_effect = [120, 120, 120]
+ mock_create.side_effect = [False, False, True]
+ mock_get_nodelist.side_effect = [["node1"], ["node1", "node2"]]
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_get_nodelist.assert_has_calls([mock.call(), mock.call()])
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.lock_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.lock_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_fail')
+ def test_lock_no_wait(self, mock_lock, mock_unlock):
+ with self.lock_inst_no_wait.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
diff --git a/test/unittests/test_objset.py b/test/unittests/test_objset.py
new file mode 100644
index 0000000..cae39ca
--- /dev/null
+++ b/test/unittests/test_objset.py
@@ -0,0 +1,40 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import cibconfig
+
+factory = cibconfig.cib_factory
+
+
+def assert_in(needle, haystack):
+ if needle not in haystack:
+ message = "%s not in %s" % (needle, haystack)
+ raise AssertionError(message)
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_nodes_nocli():
+ for n in factory.node_id_list():
+ obj = factory.find_object(n)
+ if obj is not None:
+ assert obj.node is not None
+ assert True == obj.cli_use_validate()
+ assert False == obj.nocli
+
+
+def test_show():
+ setobj = cibconfig.mkset_obj()
+ s = setobj.repr_nopretty()
+ sp = s.splitlines()
+ assert_in("node ha-one", sp[0:3])
diff --git a/test/unittests/test_ocfs2.py b/test/unittests/test_ocfs2.py
new file mode 100644
index 0000000..603c68d
--- /dev/null
+++ b/test/unittests/test_ocfs2.py
@@ -0,0 +1,465 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import ocfs2, utils, ra, constants
+
+logging.basicConfig(level=logging.INFO)
+
+class TestOCFS2Manager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ context1 = mock.Mock(ocfs2_devices=[])
+ self.ocfs2_inst1 = ocfs2.OCFS2Manager(context1)
+
+ context2 = mock.Mock(ocfs2_devices=[],
+ stage="ocfs2",
+ yes_to_all=True)
+ self.ocfs2_inst2 = ocfs2.OCFS2Manager(context2)
+
+ context3 = mock.Mock(ocfs2_devices=["/dev/sdb2", "/dev/sdc2"],
+ use_cluster_lvm2=False)
+ self.ocfs2_inst3 = ocfs2.OCFS2Manager(context3)
+
+ context4 = mock.Mock(ocfs2_devices=[],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst4 = ocfs2.OCFS2Manager(context4)
+
+ context5 = mock.Mock(ocfs2_devices=["/dev/sda2", "/dev/sda2"])
+ self.ocfs2_inst5 = ocfs2.OCFS2Manager(context5)
+
+ context6 = mock.Mock(ocfs2_devices=["/dev/sda2"],
+ mount_point="/data")
+ self.ocfs2_inst6 = ocfs2.OCFS2Manager(context6)
+
+ context7 = mock.Mock(ocfs2_devices=["/dev/sdb2"],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst7 = ocfs2.OCFS2Manager(context7)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_verify_packages(self, mock_installed):
+ mock_installed.side_effect = [True, False]
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst1._verify_packages(use_cluster_lvm2=True)
+ self.assertEqual("Missing required package for configuring OCFS2: lvm2-lockd", str(err.exception))
+ mock_installed.assert_has_calls([
+ mock.call("ocfs2-tools"),
+ mock.call("lvm2-lockd")
+ ])
+
+ def test_verify_options_stage_miss_option(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._verify_options()
+ self.assertEqual("ocfs2 stage require -o option", str(err.exception))
+
+ def test_verify_options_two_devices(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_options()
+ self.assertEqual("Without Cluster LVM2 (-C option), -o option only support one device", str(err.exception))
+
+ def test_verify_options_only_C(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst4._verify_options()
+ self.assertEqual("-C option only valid together with -o option", str(err.exception))
+
+ @mock.patch('crmsh.utils.has_mount_point_used')
+ def test_verify_options_mount(self, mock_mount):
+ mock_mount.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst6._verify_options()
+ self.assertEqual("Mount point /data already mounted", str(err.exception))
+ mock_mount.assert_called_once_with("/data")
+
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_not_block(self, mock_is_block):
+ mock_is_block.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_devices()
+ self.assertEqual("/dev/sdb2 doesn't look like a block device", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_lvm(self, mock_is_block, mock_lvm):
+ mock_lvm.return_value = True
+ mock_is_block.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 is a Logical Volume, cannot be used with the -C option", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.has_disk_mounted')
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_mounted(self, mock_is_block, mock_lvm, mock_mounted):
+ mock_lvm.return_value = False
+ mock_is_block.return_value = True
+ mock_mounted.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 already mounted", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+ mock_mounted.assert_called_once_with("/dev/sdb2")
+
+ def test_check_if_already_configured_return(self):
+ self.ocfs2_inst3._check_if_already_configured()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_if_already_configured(self, mock_run, mock_info):
+ mock_run.return_value = "data xxx fstype=ocfs2 sss"
+ with self.assertRaises(utils.TerminateSubCommand):
+ self.ocfs2_inst2._check_if_already_configured()
+ mock_run.assert_called_once_with("crm configure show")
+ mock_info.assert_called_once_with("Already configured OCFS2 related resources")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_devices')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_if_already_configured')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_options')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ def test_static_verify(self, mock_verify_packages, mock_verify_options, mock_configured, mock_verify_devices):
+ self.ocfs2_inst3._static_verify()
+ mock_verify_packages.assert_called_once_with(False)
+ mock_verify_options.assert_called_once_with()
+ mock_configured.assert_called_once_with()
+ mock_verify_devices.assert_called_once_with()
+
+ def test_dynamic_raise_error(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._dynamic_raise_error("error messages")
+ self.assertEqual("error messages", str(err.exception))
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ def test_check_sbd_and_ocfs2_dev(self, mock_enabled, mock_get_device, mock_error):
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb2"]
+ self.ocfs2_inst3._check_sbd_and_ocfs2_dev()
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_error.assert_called_once_with("/dev/sdb2 cannot be the same with SBD device")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev(self, mock_has_parted, mock_fstype, mock_confirm):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, False]
+ with self.assertRaises(utils.TerminateSubCommand) as err:
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev_confirmed(self, mock_has_parted, mock_fstype, mock_confirm, mock_run):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, True]
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+ mock_run.assert_has_calls([
+ mock.call("wipefs -a /dev/sdb2"),
+ mock.call("wipefs -a /dev/sdc2")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify_error(self, mock_has_stonith, mock_error):
+ mock_has_stonith.return_value = False
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_error.assert_called_once_with("OCFS2 requires stonith device configured and running")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._confirm_to_overwrite_ocfs2_dev')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_sbd_and_ocfs2_dev')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify(self, mock_has_stonith, mock_check_dev, mock_confirm):
+ mock_has_stonith.return_value = True
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_check_dev.assert_called_once_with()
+ mock_confirm.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.gen_unused_id')
+ def test_gen_ra_scripts(self, mock_gen_unused):
+ self.ocfs2_inst3.exist_ra_id_list = []
+ mock_gen_unused.return_value = "g1"
+ res = self.ocfs2_inst3._gen_ra_scripts("GROUP", {"id": "g1", "ra_string": "d vip"})
+ assert res == ("g1", "\ngroup g1 d vip")
+ mock_gen_unused.assert_called_once_with([], "g1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_mkfs(self, mock_long, mock_get_value, mock_run):
+ mock_get_value.return_value = "hacluster"
+ self.ocfs2_inst3._mkfs("/dev/sdb2")
+ mock_long.assert_called_once_with(" Creating OCFS2 filesystem for /dev/sdb2")
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+ mock_run.assert_called_once_with("mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sdb2")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_vg_change(self, mock_run):
+ self.ocfs2_inst3.vg_id = "vg1"
+ with self.ocfs2_inst3._vg_change():
+ pass
+ mock_run.assert_has_calls([
+ mock.call("vgchange -ay vg1"),
+ mock.call("vgchange -an vg1")
+ ])
+
+ @mock.patch('crmsh.utils.get_pe_number')
+ @mock.patch('crmsh.utils.gen_unused_id')
+ @mock.patch('crmsh.utils.get_all_vg_name')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_create_lv(self, mock_long, mock_run, mock_all_vg, mock_unused, mock_pe_num):
+ mock_all_vg.return_value = []
+ mock_unused.return_value = "vg1"
+ mock_pe_num.return_value = 1234
+ res = self.ocfs2_inst3._create_lv()
+ self.assertEqual(res, "/dev/vg1/ocfs2-lv")
+ mock_run.assert_has_calls([
+ mock.call("pvcreate /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("vgcreate --shared vg1 /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("lvcreate -l 1234 vg1 -n ocfs2-lv -y")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_group_and_clone_scripts(self, mock_gen):
+ mock_gen.side_effect = [("id1", "group_script\n"), ("id2", "clone_script\n")]
+ res = self.ocfs2_inst3._gen_group_and_clone_scripts(["ra1", "ra2"])
+ self.assertEqual(res, "group_script\nclone_script\n")
+ mock_gen.assert_has_calls([
+ mock.call('GROUP', {'id': 'ocfs2-group', 'ra_string': 'ra1 ra2'}),
+ mock.call('CLONE', {'id': 'ocfs2-clone', 'group_id': 'id1'})
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_fs_scripts(self, mock_gen):
+ mock_gen.return_value = "scripts"
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ res = self.ocfs2_inst3._gen_fs_scripts()
+ self.assertEqual(res, "scripts")
+ mock_gen.assert_called_once_with("Filesystem", {'id': 'ocfs2-clusterfs', 'mnt_point': '/data', 'fs_type': 'ocfs2', 'device': '/dev/sda1'})
+
+ @mock.patch('crmsh.bootstrap.wait_for_resource')
+ @mock.patch('crmsh.utils.append_res_to_group')
+ @mock.patch('crmsh.bootstrap.crm_configure_load')
+ def test_load_append_and_wait(self, mock_load, mock_append, mock_wait):
+ self.ocfs2_inst3.group_id = "g1"
+ self.ocfs2_inst3._load_append_and_wait("scripts", "res_id", "messages data")
+ mock_load.assert_called_once_with("update", "scripts")
+ mock_append.assert_called_once_with("g1", "res_id")
+ mock_wait.assert_called_once_with("messages data", "res_id")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_group_and_clone_scripts')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_dlm(self, mock_gen_ra, mock_gen_group, mock_load_wait):
+ mock_gen_ra.return_value = ("dlm_id", "dlm_scripts\n")
+ mock_gen_group.return_value = "group_scripts\n"
+ self.ocfs2_inst3._config_dlm()
+ mock_gen_ra.assert_called_once_with("DLM", {"id": "ocfs2-dlm"})
+ mock_gen_group.assert_called_once_with(["dlm_id"])
+ mock_load_wait.assert_called_once_with("dlm_scripts\ngroup_scripts\n", "dlm_id", " Wait for DLM(dlm_id) start", need_append=False)
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmlockd(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3._config_lvmlockd()
+ mock_gen_ra.assert_called_once_with("LVMLockd", {"id": "ocfs2-lvmlockd"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMLockd(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmactivate(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.vg_id = "vg1"
+ self.ocfs2_inst3._config_lvmactivate()
+ mock_gen_ra.assert_called_once_with("LVMActivate", {"id": "ocfs2-lvmactivate", "vgname": "vg1"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMActivate(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_fs_scripts')
+ @mock.patch('crmsh.utils.mkdirp')
+ def test_config_fs(self, mock_mkdir, mock_gen_fs, mock_load_wait):
+ mock_gen_fs.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3._config_fs()
+ mock_mkdir.assert_called_once_with("/data")
+ mock_gen_fs.assert_called_once_with()
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for Filesystem(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmactivate')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._vg_change')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._create_lv')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmlockd')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_lvm2(self, mock_dlm, mock_lvmlockd, mock_lv, mock_vg, mock_mkfs, mock_lvmactivate, mock_fs):
+ mock_lv.return_value = "/dev/sda1"
+ self.ocfs2_inst3._config_resource_stack_lvm2()
+ mock_dlm.assert_called_once_with()
+ mock_lvmlockd.assert_called_once_with()
+ mock_lv.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sda1")
+ mock_lvmactivate.assert_called_once_with()
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_ocfs2_along(self, mock_dlm, mock_mkfs, mock_fs):
+ self.ocfs2_inst3._config_resource_stack_ocfs2_along()
+ mock_dlm.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sdb2")
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_lvm2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst7.mount_point = "/data"
+ self.ocfs2_inst7.target_device = "/dev/vg1/lv1"
+ self.ocfs2_inst7.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/vg1/lv1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_lvm2.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_ocfs2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ self.ocfs2_inst3.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/sda1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_ocfs2.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_none(self, mock_run):
+ mock_run.return_value = "data"
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ assert res is None
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_exception(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2
+ """
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual("Filesystem require configure device", str(err.exception))
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2 device="/dev/sda2"
+ """
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual(res, "/dev/sda2")
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2_return(self, mock_find):
+ mock_find.return_value = None
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.compare_uuid_with_peer_dev')
+ @mock.patch('crmsh.utils.is_dev_a_plain_raw_disk_or_partition')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2(self, mock_find, mock_long, mock_parser, mock_verify_packages, mock_is_mapper, mock_compare):
+ mock_find.return_value = "/dev/sda2"
+ mock_parser("node1").is_resource_configured.return_value = False
+ mock_is_mapper.return_value = True
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+ mock_verify_packages.assert_called_once_with(False)
+ mock_is_mapper.assert_called_once_with("/dev/sda2", "node1")
+ mock_compare.assert_called_once_with(["/dev/sda2"], "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._static_verify')
+ def test_verify_ocfs2(self, mock_static_verify):
+ context1 = mock.Mock(ocfs2_devices=[])
+ ocfs2.OCFS2Manager.verify_ocfs2(context1)
+ mock_static_verify.assert_called_once_with()
diff --git a/test/unittests/test_parallax.py b/test/unittests/test_parallax.py
new file mode 100644
index 0000000..b934d91
--- /dev/null
+++ b/test/unittests/test_parallax.py
@@ -0,0 +1,104 @@
+from __future__ import unicode_literals
+# Copyright (C) 2019 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parallax.py
+
+
+import unittest
+from unittest import mock
+
+import crmsh.parallax
+import crmsh.prun.prun
+
+
+class TestParallax(unittest.TestCase):
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_call(["node1"], "ls")
+ self.assertEqual(
+ result,
+ [("node1", (0, None, None))],
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(255, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (0, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (1, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.SSHError("alice", "node1", "foo")
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_run(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": "/opt/node1/file.c"}
+ results = crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ self.assertListEqual([("node1", "/opt/node1/file.c")], results)
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp_exception(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo")}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": None, "node2": None}
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy_exception(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo"), "node2": None}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
diff --git a/test/unittests/test_parse.py b/test/unittests/test_parse.py
new file mode 100644
index 0000000..27b26b9
--- /dev/null
+++ b/test/unittests/test_parse.py
@@ -0,0 +1,749 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from builtins import zip
+from crmsh import parse
+import unittest
+import shlex
+from crmsh.utils import lines2cli
+from crmsh.xmlutil import xml_tostring
+from lxml import etree
+
+
+def test_score_to_kind():
+ assert parse.score_to_kind("0") == "Optional"
+ assert parse.score_to_kind("INFINITY") == "Mandatory"
+ assert parse.score_to_kind("200") == "Mandatory"
+
+
+class MockValidation(parse.Validation):
+ def resource_roles(self):
+ return ['Master', 'Slave', 'Started']
+
+ def resource_actions(self):
+ return ['start', 'stop', 'promote', 'demote']
+
+ def date_ops(self):
+ return ['lt', 'gt', 'in_range', 'date_spec']
+
+ def expression_types(self):
+ return ['normal', 'string', 'number']
+
+ def rsc_order_kinds(self):
+ return ['Mandatory', 'Optional', 'Serialize']
+
+ def op_attributes(self):
+ return ['id', 'name', 'interval', 'timeout', 'description',
+ 'start-delay', 'interval-origin', 'timeout', 'enabled',
+ 'record-pending', 'role', 'requires', 'on-fail']
+
+ def acl_2_0(self):
+ return True
+
+
+class TestBaseParser(unittest.TestCase):
+ def setUp(self):
+ self.base = parse.BaseParser()
+
+ def _reset(self, cmd):
+ self.base._cmd = shlex.split(cmd)
+ self.base._currtok = 0
+
+ @mock.patch('logging.Logger.error')
+ def test_err(self, mock_err):
+ self._reset('a:b:c:d')
+
+ def runner():
+ self.base.match_split()
+ self.assertRaises(parse.ParseError, runner)
+
+ @mock.patch('logging.Logger.error')
+ def test_idspec(self, mock_error):
+ self._reset('$id=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ self._reset('$id-ref=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id-ref')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ def runner():
+ self._reset('id=foo')
+ self.base.match_idspec()
+ self.assertRaises(parse.ParseError, runner)
+
+ def test_match_split(self):
+ self._reset('resource:role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'resource')
+ self.assertEqual(b, 'role')
+
+ self._reset('role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'role')
+ self.assertEqual(b, None)
+
+ def test_description(self):
+ self._reset('description="this is a description"')
+ self.assertEqual(self.base.try_match_description(), 'this is a description')
+
+ def test_nvpairs(self):
+ self._reset('foo=bar wiz="fizz buzz" bug= bug2=')
+ ret = self.base.match_nvpairs()
+ self.assertEqual(len(ret), 4)
+ retdict = dict([(r.get('name'), r.get('value')) for r in ret])
+ self.assertEqual(retdict['foo'], 'bar')
+ self.assertEqual(retdict['bug'], '')
+ self.assertEqual(retdict['wiz'], 'fizz buzz')
+
+
+class TestCliParser(unittest.TestCase):
+ def setUp(self):
+ parse.validator = MockValidation()
+ self.comments = []
+
+ def _parse(self, s):
+ return parse.parse(s, comments=self.comments)
+
+ @mock.patch('logging.Logger.error')
+ def test_node(self, mock_error):
+ out = self._parse('node node-1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node 1: node-1')
+ self.assertEqual(out.get('id'), '1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node testid: node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1:ping')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+ self.assertEqual(out.get('type'), 'ping')
+
+ out = self._parse('node node-1:unknown')
+ self.assertFalse(out)
+
+ out = self._parse('node node-1 description="foo bar" attributes foo=bar')
+ self.assertEqual(out.get('description'), 'foo bar')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+
+ out = self._parse('node node-1 attributes foo=bar utilization wiz=bang')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('utilization/nvpair[@name="wiz"]/@value'))
+
+ @mock.patch('logging.Logger.error')
+ def test_resources(self, mock_error):
+ out = self._parse('primitive www ocf:heartbeat:apache op monitor timeout=10s')
+ self.assertEqual(out.get('id'), 'www')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['monitor'], out.xpath('//op/@name'))
+
+ out = self._parse('rsc_template public_vm ocf:heartbeat:Xen op start timeout=300s op stop timeout=300s op monitor interval=30s timeout=60s op migrate_from timeout=600s op migrate_to timeout=600s')
+ self.assertEqual(out.get('id'), 'public_vm')
+ self.assertEqual(out.get('class'), 'ocf')
+ #print out
+
+ out = self._parse('primitive st stonith:ssh params hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out.get('id'), 'st')
+
+ out2 = self._parse('primitive st stonith:ssh hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out2.get('id'), 'st')
+
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('primitive st stonith:ssh params hostlist= meta')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('primitive st stonith:null params hostlist=node1 meta requires=nothing description="some description here" op start op monitor interval=60m')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('ms m0 resource params a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ print(xml_tostring(out))
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('instance_attributes/nvpair[@name="a"]/@value'))
+
+ out2 = self._parse('ms m0 resource a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('master ma resource meta a=b')
+ self.assertEqual(out.get('id'), 'ma')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('clone clone-1 resource meta a=b')
+ self.assertEqual(out.get('id'), 'clone-1')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('group group-1 a')
+ self.assertEqual(out.get('id'), 'group-1')
+ self.assertEqual(len(out), 1)
+
+ out = self._parse('group group-1 a b c')
+ self.assertEqual(len(out), 3)
+
+ out = self._parse('group group-1')
+ self.assertFalse(out)
+
+ out = self._parse('group group-1 params a=b')
+ self.assertEqual(len(out), 1)
+ self.assertEqual(['b'], out.xpath('/group/instance_attributes/nvpair[@name="a"]/@value'))
+
+ def test_heartbeat_class(self):
+ out = self._parse('primitive p_node-activate heartbeat:node-activate')
+ self.assertEqual(out.get('id'), 'p_node-activate')
+ self.assertEqual(out.get('class'), 'heartbeat')
+ self.assertEqual(out.get('provider'), None)
+ self.assertEqual(out.get('type'), 'node-activate')
+
+
+ def test_nvpair_ref(self):
+ out = self._parse('primitive dummy-0 Dummy params @foo')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['foo'], out.xpath('.//nvpair/@id-ref'))
+
+ out = self._parse('primitive dummy-0 Dummy params @fiz:buz')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['fiz'], out.xpath('.//nvpair/@id-ref'))
+ self.assertEqual(['buz'], out.xpath('.//nvpair/@name'))
+
+ @mock.patch('logging.Logger.error')
+ def test_location(self, mock_error):
+ out = self._parse('location loc-1 resource inf: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('location loc-1 /foo.*/ inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc-pattern'), 'foo.*')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 // inf: bar')
+ self.assertFalse(out)
+
+ out = self._parse('location loc-1 { one ( two three ) four } inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(['one', 'two', 'three', 'four'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 thing rule role=slave -inf: #uname eq madrid')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'thing')
+ self.assertEqual(out.get('score'), None)
+
+ out = self._parse('location l { a:foo b:bar }')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_colocation(self, mock_error):
+ out = self._parse('colocation col-1 inf: foo:master ( bar wiz sequential=yes )')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(['foo', 'bar', 'wiz'], out.xpath('//resource_ref/@id'))
+ self.assertEqual([], out.xpath('//resource_set[@name="sequential"]/@value'))
+
+ out = self._parse(
+ 'colocation col-1 -20: foo:Master ( bar wiz ) ( zip zoo ) node-attribute="fiz"')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(out.get('score'), '-20')
+ self.assertEqual(['foo', 'bar', 'wiz', 'zip', 'zoo'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(['fiz'], out.xpath('//@node-attribute'))
+
+ out = self._parse('colocation col-1 0: a:master b')
+ self.assertEqual(out.get('id'), 'col-1')
+
+ out = self._parse('colocation col-1 10: ) bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz ]')
+ self.assertFalse(out)
+
+ def test_order(self):
+ out = self._parse('order o1 Mandatory: [ A B sequential=true ] C')
+ print(xml_tostring(out))
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(['false'], out.xpath('/rsc_order/resource_set/@require-all'))
+ self.assertEqual(['A', 'B', 'C'], out.xpath('//resource_ref/@id'))
+
+ out = self._parse('order o1 Mandatory: [ A B sequential=false ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['require-all', 'false'] in out.resources[0][1])
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=false')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=true')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'true'] not in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order c_apache_1 Mandatory: apache:start ip_1')
+ self.assertEqual(out.get('id'), 'c_apache_1')
+
+ out = self._parse('order c_apache_2 Mandatory: apache:start ip_1 ip_2 ip_3')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'c_apache_2')
+
+ out = self._parse('order o1 Serialize: A ( B C )')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=false')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['false'], out.xpath('//@symmetrical'))
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=true')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['true'], out.xpath('//@symmetrical'))
+
+ inp = 'colocation rsc_colocation-master INFINITY: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]'
+ out = self._parse(inp)
+ self.assertEqual(2, len(out.xpath('/rsc_colocation/resource_set')))
+ self.assertEqual(out.get('id'), 'rsc_colocation-master')
+
+ out = self._parse('order order_2 Mandatory: [ A B ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'order_2')
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['false'], out.xpath('//resource_set/@sequential'))
+
+ out = self._parse('order order-1 Optional: group1:stop group2:start')
+ self.assertEqual(out.get('id'), 'order-1')
+ self.assertEqual(['Optional'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['group1'], out.xpath('/rsc_order/@first'))
+ self.assertEqual(['stop'], out.xpath('/rsc_order/@first-action'))
+ self.assertEqual(['group2'], out.xpath('/rsc_order/@then'))
+ self.assertEqual(['start'], out.xpath('/rsc_order/@then-action'))
+
+ def test_ticket(self):
+ out = self._parse('rsc_ticket ticket-A_public-ip ticket-A: public-ip')
+ self.assertEqual(out.get('id'), 'ticket-A_public-ip')
+
+ out = self._parse('rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence')
+ self.assertEqual(out.get('id'), 'ticket-A_bigdb')
+
+ out = self._parse(
+ 'rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master')
+ self.assertEqual(out.get('id'), 'ticket-B_storage')
+
+ @mock.patch('logging.Logger.error')
+ def test_bundle(self, mock_error):
+ out = self._parse('bundle httpd docker image=pcmk:httpd replicas=3 network ip-range-start=10.10.10.123 host-netmask=24 port-mapping port=80 storage storage-mapping target-dir=/var/www/html source-dir=/srv/www options=rw primitive httpd-apache')
+ self.assertEqual(out.get('id'), 'httpd')
+ self.assertEqual(['pcmk:httpd'], out.xpath('/bundle/docker/@image'))
+ self.assertEqual(['httpd-apache'], out.xpath('/bundle/crmsh-ref/@id'))
+
+ out = self._parse('bundle httpd docker image=pcmk:httpd primitive httpd-apache apache')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_op(self, mock_error):
+ out = self._parse('monitor apache:Master 10s:20s')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), 'Master')
+ self.assertEqual(out.get('interval'), '10s')
+ self.assertEqual(out.get('timeout'), '20s')
+
+ out = self._parse('monitor apache 60m')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), None)
+ self.assertEqual(out.get('interval'), '60m')
+
+ out = self._parse('primitive rsc_dummy1 Dummy op monitor interval=10 OCF_CHECK_LEVEL=10 timeout=60')
+ # incorrect ordering of attributes
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_acl(self, mock_error):
+ out = self._parse('role user-1 error')
+ self.assertFalse(out)
+ out = self._parse('user user-1 role:user-1')
+ self.assertNotEqual(out, False)
+
+ out = self._parse("role bigdb_admin " +
+ "write meta:bigdb:target-role " +
+ "write meta:bigdb:is-managed " +
+ "write location:bigdb " +
+ "read ref:bigdb")
+ self.assertEqual(4, len(out))
+
+ # new type of acls
+
+ out = self._parse("acl_target foo a")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b c")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse("acl_group fee a b c")
+ self.assertEqual('acl_group', out.tag)
+ self.assertEqual('fee', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse('role fum description="test" read a: description="test2" xpath:*[@name=\\"karl\\"]')
+ self.assertEqual(['*[@name="karl"]'], out.xpath('/acl_role/acl_permission/@xpath'))
+
+ def test_xml(self):
+ out = self._parse('xml <node uname="foo-1"/>')
+ self.assertEqual('node', out.tag)
+ self.assertEqual('foo-1', out.get('uname'))
+
+ @mock.patch('logging.Logger.error')
+ def test_property(self, mock_error):
+ out = self._parse('property stonith-enabled=true')
+ self.assertEqual(['true'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+
+ # missing score
+ out = self._parse('property rule #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['INFINITY'], out.xpath('//@score'))
+
+ out = self._parse('property rule 10: #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['node1'], out.xpath('//expression[@attribute="#uname"]/@value'))
+
+ out = self._parse('property rule +inf: date spec years=2014 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['2014'], out.xpath('//date_spec/@years'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m')
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults foo: failure-timeout=3m')
+ self.assertEqual('foo', out[0].get('id'))
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m foo:')
+ self.assertEqual(False, out)
+
+ def test_empty_property_sets(self):
+ out = self._parse('rsc_defaults defaults:')
+ self.assertEqual('<rsc_defaults><meta_attributes id="defaults"/></rsc_defaults>',
+ xml_tostring(out))
+
+ out = self._parse('op_defaults defaults:')
+ self.assertEqual('<op_defaults><meta_attributes id="defaults"/></op_defaults>',
+ xml_tostring(out))
+
+ def test_fencing(self):
+ # num test nodes are 3
+
+ out = self._parse('fencing_topology')
+ expect = '<fencing-topology/>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology poison-pill power')
+ expect = '<fencing-topology><fencing-level target="ha-one" index="1" devices="poison-pill"/><fencing-level target="ha-one" index="2" devices="power"/><fencing-level target="ha-three" index="1" devices="poison-pill"/><fencing-level target="ha-three" index="2" devices="power"/><fencing-level target="ha-two" index="1" devices="poison-pill"/><fencing-level target="ha-two" index="2" devices="power"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology node-a: poison-pill power node-b: ipmi serial')
+ self.assertEqual(4, len(out))
+
+ devs = ['stonith-vbox3-1-off', 'stonith-vbox3-2-off',
+ 'stonith-vbox3-1-on', 'stonith-vbox3-2-on']
+ out = self._parse('fencing_topology vbox4: %s' % ','.join(devs))
+ print(xml_tostring(out))
+ self.assertEqual(1, len(out))
+
+ def test_fencing_1114(self):
+ """
+ Test node attribute fence target assignment
+ """
+ out = self._parse('fencing_topology attr:rack=1 poison-pill power')
+ expect = """<fencing-topology><fencing-level index="1" devices="poison-pill" target-attribute="rack" target-value="1"/><fencing-level index="2" devices="power" target-attribute="rack" target-value="1"/></fencing-topology>"""
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology attr:rack=1 poison-pill,power')
+ expect = '<fencing-topology><fencing-level index="1" devices="poison-pill,power" target-attribute="rack" target-value="1"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ @mock.patch('logging.Logger.error')
+ def test_tag(self, mock_error):
+ out = self._parse('tag tag1: one two three')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['one', 'two', 'three'], out.xpath('/tag/obj_ref/@id'))
+
+ out = self._parse('tag tag1:')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1:: foo')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1 foo bar')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['foo', 'bar'], out.xpath('/tag/obj_ref/@id'))
+
+ def test_alerts(self):
+ "Test alerts (1.1.15+)"
+ out = self._parse('alert alert1 /tmp/foo.sh to /tmp/bar.log')
+ self.assertEqual(out.get('id'), 'alert1')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+
+ def test_alerts_brackets(self):
+ "Test alerts w/ brackets (1.1.15+)"
+ out = self._parse('alert alert2 /tmp/foo.sh to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert2')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+ self.assertEqual(['10s'],
+ out.xpath('/alert/recipient/meta_attributes/nvpair[@name="timeout"]/@value'))
+
+ def test_alerts_selectors(self):
+ "Test alerts w/ selectors (1.1.17+)"
+ out = self._parse('alert alert3 /tmp/foo.sh select nodes fencing attributes { standby shutdown } to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert3')
+ self.assertEqual(1, len(out.xpath('/alert/select/select_nodes')))
+ self.assertEqual(1, len(out.xpath('/alert/select/select_fencing')))
+ self.assertEqual(['standby', 'shutdown'],
+ out.xpath('/alert/select/select_attributes/attribute/@name'))
+
+
+ def _parse_lines(self, lines):
+ out = []
+ for line in lines2cli(lines):
+ if line is not None:
+ tmp = self._parse(line.strip())
+ self.assertNotEqual(tmp, False)
+ if tmp is not None:
+ out.append(tmp)
+ return out
+
+ def test_comments(self):
+ outp = self._parse_lines('''
+ # comment
+ node n1
+ ''')
+ self.assertNotEqual(-1, xml_tostring(outp[0]).find('# comment'))
+
+ def test_uppercase(self):
+ outp = self._parse_lines('''
+ PRIMITIVE rsc_dummy ocf:heartbeat:Dummy
+ MONITOR rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('op', outp[1].tag)
+
+ outp = self._parse_lines('''
+ PRIMITIVE testfs ocf:heartbeat:Filesystem \
+ PARAMS directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ CLONE testfs-clone testfs \
+ META ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('clone', outp[1].tag)
+
+ out = self._parse('LOCATION loc-1 resource INF: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('NODE node-1 ATTRIBUTES foo=bar UTILIZATION wiz=bang')
+ self.assertEqual('node-1', out.get('uname'))
+ self.assertEqual(['bar'], out.xpath('/node/instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('/node/utilization/nvpair[@name="wiz"]/@value'))
+
+ out = self._parse('PRIMITIVE virtual-ip ocf:heartbeat:IPaddr2 PARAMS ip=192.168.122.13 lvs_support=false OP start timeout=20 interval=0 OP stop timeout=20 interval=0 OP monitor interval=10 timeout=20')
+ self.assertEqual(['192.168.122.13'], out.xpath('//instance_attributes/nvpair[@name="ip"]/@value'))
+
+ out = self._parse('GROUP web-server virtual-ip apache META target-role=Started')
+ self.assertEqual(out.get('id'), 'web-server')
+
+ def test_nvpair_novalue(self):
+ inp = """primitive stonith_ipmi-karl stonith:fence_ipmilan \
+ params pcmk_host_list=karl verbose action=reboot \
+ ipaddr=10.43.242.221 login=root passwd=dummy method=onoff \
+ op start interval=0 timeout=60 \
+ op stop interval=0 timeout=60 \
+ op monitor interval=600 timeout=60 \
+ meta target-role=Started"""
+
+ outp = self._parse_lines(inp)
+ self.assertEqual(len(outp), 1)
+ self.assertEqual('primitive', outp[0].tag)
+ # print xml_tostring(outp[0])
+ verbose = outp[0].xpath('//nvpair[@name="verbose"]')
+ self.assertEqual(len(verbose), 1)
+ self.assertTrue('value' not in verbose[0].attrib)
+
+ @mock.patch('logging.Logger.error')
+ def test_configs(self, mock_error):
+ outp = self._parse_lines('''
+ primitive rsc_dummy ocf:heartbeat:Dummy
+ monitor rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ outp = self._parse_lines('''
+ primitive testfs ocf:heartbeat:Filesystem \
+ params directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ inp = [
+ """node node1 attributes mem=16G""",
+ """node node2 utilization cpu=4""",
+ """primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires="nothing" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s""",
+ """primitive st2 stonith:ssh \
+ params hostlist='node1 node2'""",
+ """primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10""",
+ """monitor d1 60s:30s""",
+ """primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s""",
+ """monitor d2:Started 60s:30s""",
+ """group g1 d1 d2""",
+ """primitive d3 ocf:pacemaker:Dummy""",
+ """clone c d3 \
+ meta clone-max=1""",
+ """primitive d4 ocf:pacemaker:Dummy""",
+ """ms m d4""",
+ """primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops""",
+ """primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1""",
+ """ms m5 s5""",
+ """ms m6 s6""",
+ """location l1 g1 100: node1""",
+ """location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1""",
+ """location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0""",
+ """location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0""",
+ """location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date date_spec years="2009" hours=09-17""",
+ """location l6 m5 \
+ rule $id-ref=l2-rule1""",
+ """location l7 m5 \
+ rule $id-ref=l2""",
+ """collocation c1 inf: m6 m5""",
+ """collocation c2 inf: m5:Master d1:Started""",
+ """order o1 Mandatory: m5 m6""",
+ """order o2 Optional: d1:start m5:promote""",
+ """order o3 Serialize: m5 m6""",
+ """order o4 inf: m5 m6""",
+ """rsc_ticket ticket-A_m6 ticket-A: m6""",
+ """rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence""",
+ """rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence""",
+ """fencing_topology st st2""",
+ """property stonith-enabled=true""",
+ """property $id=cpset2 maintenance-mode=true""",
+ """rsc_defaults failure-timeout=10m""",
+ """op_defaults $id=opsdef2 record-pending=true"""]
+
+ outp = self._parse_lines('\n'.join(inp))
+ a = [xml_tostring(x) for x in outp]
+ b = [
+ '<node uname="node1"><instance_attributes><nvpair name="mem" value="16G"/></instance_attributes></node>',
+ '<node uname="node2"><utilization><nvpair name="cpu" value="4"/></utilization></node>',
+ '<primitive id="st" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes><meta_attributes><nvpair name="target-role" value="Started"/><nvpair name="requires" value="nothing"/></meta_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="monitor" interval="60m" timeout="60s"/></operations></primitive>',
+ '<primitive id="st2" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes></primitive>',
+ '<primitive id="d1" class="ocf" provider="pacemaker" type="Dummy"><operations id="d1-ops"><op name="monitor" interval="60m"/><op name="monitor" interval="120m"><instance_attributes><nvpair name="OCF_CHECK_LEVEL" value="10"/></instance_attributes></op></operations></primitive>',
+ '<op name="monitor" rsc="d1" interval="60s" timeout="30s"/>',
+ '<primitive id="d2" class="ocf" provider="heartbeat" type="Delay"><instance_attributes><nvpair name="mondelay" value="60"/></instance_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="stop" timeout="60s" interval="0s"/></operations></primitive>',
+ '<op name="monitor" role="Started" rsc="d2" interval="60s" timeout="30s"/>',
+ '<group id="g1"><crmsh-ref id="d1"/><crmsh-ref id="d2"/></group>',
+ '<primitive id="d3" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<clone id="c"><meta_attributes><nvpair name="clone-max" value="1"/></meta_attributes><crmsh-ref id="d3"/></clone>',
+ '<primitive id="d4" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<master id="m"><crmsh-ref id="d4"/></master>',
+ '<primitive id="s5" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1-ops"/></primitive>',
+ '<primitive id="s6" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1"/></primitive>',
+ '<master id="m5"><crmsh-ref id="s5"/></master>',
+ '<master id="m6"><crmsh-ref id="s6"/></master>',
+ '<rsc_location id="l1" rsc="g1" score="100" node="node1"/>',
+ '<rsc_location id="l2" rsc="c"><rule id="l2-rule1" score="100"><expression operation="eq" attribute="#uname" value="node1"/></rule></rsc_location>',
+ '<rsc_location id="l3" rsc="m5"><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l4" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l5" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule><rule score="INFINITY"><date_expression operation="lt" end="2009-05-26"/><date_expression operation="in_range" start="2009-05-26" end="2009-07-26"/><date_expression operation="in_range" start="2009-05-26"><duration years="2009"/></date_expression><date_expression operation="date_spec"><date_spec years="2009" hours="09-17"/></date_expression></rule></rsc_location>',
+ '<rsc_location id="l6" rsc="m5"><rule id-ref="l2-rule1"/></rsc_location>',
+ '<rsc_location id="l7" rsc="m5"><rule id-ref="l2"/></rsc_location>',
+ '<rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>',
+ '<rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>',
+ '<rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>',
+ '<rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>',
+ '<rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>',
+ '<rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence"><resource_set><resource_ref id="m6"/><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence"><resource_set><resource_ref id="m6"/></resource_set><resource_set role="Master"><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<fencing-topology><fencing-level target="ha-one" index="1" devices="st"/><fencing-level target="ha-one" index="2" devices="st2"/><fencing-level target="ha-three" index="1" devices="st"/><fencing-level target="ha-three" index="2" devices="st2"/><fencing-level target="ha-two" index="1" devices="st"/><fencing-level target="ha-two" index="2" devices="st2"/></fencing-topology>',
+ '<cluster_property_set><nvpair name="stonith-enabled" value="true"/></cluster_property_set>',
+ '<cluster_property_set id="cpset2"><nvpair name="maintenance-mode" value="true"/></cluster_property_set>',
+ '<rsc_defaults><meta_attributes><nvpair name="failure-timeout" value="10m"/></meta_attributes></rsc_defaults>',
+ '<op_defaults><meta_attributes id="opsdef2"><nvpair name="record-pending" value="true"/></meta_attributes></op_defaults>',
+ ]
+
+ for result, expected in zip(a, b):
+ self.maxDiff = None
+ self.assertEqual(expected, result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_prun.py b/test/unittests/test_prun.py
new file mode 100644
index 0000000..7e987bf
--- /dev/null
+++ b/test/unittests/test_prun.py
@@ -0,0 +1,157 @@
+import typing
+
+import crmsh.constants
+import crmsh.prun.prun
+import crmsh.prun.runner
+
+import unittest
+from unittest import mock
+
+
+class TestPrun(unittest.TestCase):
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'bob'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'bob'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_root(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "root", "root"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_geteuid.assert_not_called()
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'root'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_localhost(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo"}
+ #mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = True
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_is_local_host.assert_called_once_with('host1')
+ mock_runner_add_task.assert_called_once_with(
+ TaskArgumentsEq(
+ ['/bin/sh'],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )
+ )
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1"}, set(results.keys()))
+
+
+class TaskArgumentsEq(crmsh.prun.runner.Task):
+ def __eq__(self, other):
+ if not isinstance(other, crmsh.prun.runner.Task):
+ return False
+ return self.args == other.args \
+ and self.input == other.input \
+ and self.stdout_config == other.stdout_config \
+ and self.stderr_config == other.stderr_config \
+ and self.context == other.context
diff --git a/test/unittests/test_qdevice.py b/test/unittests/test_qdevice.py
new file mode 100644
index 0000000..f6b2f13
--- /dev/null
+++ b/test/unittests/test_qdevice.py
@@ -0,0 +1,1031 @@
+import os
+import unittest
+import socket
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+from crmsh import qdevice, lock
+
+
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_restart(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '1', 'Total': '1'}
+ mock_quorate.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD, False, False)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_reload(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(3, 2)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_later(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_claim_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.ClaimLockError
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_fatal.assert_called_once_with("Duplicated cluster name \"cluster1\"!")
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_ssh_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+class TestQDevice(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+ self.qdevice_with_hostname = qdevice.QDevice("node.qnetd")
+ self.qdevice_with_invalid_port = qdevice.QDevice("10.10.10.123", port=100)
+ self.qdevice_with_invalid_tie_breaker = qdevice.QDevice("10.10.10.123", tie_breaker="wrong")
+ self.qdevice_with_ip_cluster_node = qdevice.QDevice("10.10.10.123", cluster_node="node1.com")
+ self.qdevice_with_invalid_cmds_relative_path = qdevice.QDevice("10.10.10.123", cmds="ls")
+ self.qdevice_with_invalid_cmds_not_exist = qdevice.QDevice("10.10.10.123", cmds="/not_exist")
+ self.qdevice_with_cluster_name = qdevice.QDevice("10.10.10.123", cluster_name="hacluster1")
+ self.qdevice_with_stage_cluster_name = qdevice.QDevice("10.10.10.123", is_stage=True, cluster_name="cluster1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_qnetd_cacert_on_local(self):
+ res = self.qdevice_with_ip.qnetd_cacert_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt")
+
+ def test_qnetd_cacert_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qnetd_cacert_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt")
+
+ def test_qdevice_crq_on_qnetd(self):
+ res = self.qdevice_with_cluster_name.qdevice_crq_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.hacluster1")
+
+ def test_qdevice_crq_on_local(self):
+ res = self.qdevice_with_ip.qdevice_crq_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq")
+
+ def test_qnetd_cluster_crt_on_qnetd(self):
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/cluster-None.crt")
+
+ @mock.patch('os.path.basename')
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ def test_qnetd_cluster_crt_on_local(self, mock_qnetd_crt, mock_basename):
+ mock_qnetd_crt.return_value = "crt_file"
+ mock_basename.return_value = "crt_file"
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/crt_file")
+
+ def test_qdevice_p12_on_local(self):
+ res = self.qdevice_with_ip.qdevice_p12_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12")
+
+ def test_qdevice_p12_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qdevice_p12_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12")
+
+ @mock.patch('crmsh.utils.check_port_open')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_port_error(self, mock_getaddrinfo, mock_ping, mock_in_local, mock_check):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = False
+ mock_check.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "ssh service on \"qnetd-node\" not available"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_local(self, mock_getaddrinfo, mock_ping, mock_in_local):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = True
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host for qnetd must be a remote one"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr(self, mock_getaddrinfo):
+ mock_getaddrinfo.side_effect = socket.error
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host \"qnetd-node\" is unreachable"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.valid_port')
+ def test_check_qdevice_port(self, mock_port):
+ mock_port.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_port("1")
+ excepted_err_string = "invalid qdevice port range(1024 - 65535)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_algo(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_algo("1")
+ excepted_err_string = "invalid ALGORITHM choice: '1' (choose from 'ffsplit', 'lms')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_check_qdevice_tie_breaker(self, mock_is_active):
+ mock_is_active.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tie_breaker("1")
+ excepted_err_string = "invalid qdevice tie_breaker(lowest/highest/valid_node_id)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_tls(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tls("1")
+ excepted_err_string = "invalid TLS choice: '1' (choose from 'on', 'off', 'required')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_hm(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics_mode("1")
+ excepted_err_string = "invalid MODE choice: '1' (choose from 'on', 'sync', 'off')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_he_path_error(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("command1")
+ excepted_err_string = "commands for heuristics should be absolute path"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('os.path.exists')
+ def test_check_qdevice_he_not_exist_erro(self, mock_exists):
+ mock_exists.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("/usr/bin/testst")
+ excepted_err_string = "command /usr/bin/testst not exist"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_check_package_installed(self, mock_installed):
+ mock_installed.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_package_installed("corosync-qdevice")
+ excepted_err_string = "Package \"corosync-qdevice\" not installed on this node"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics_mode')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tls')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tie_breaker')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_algo')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_port')
+ @mock.patch('crmsh.qdevice.QDevice.check_qnetd_addr')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ def test_valid_qdevice_options(self, mock_installed, mock_check_qnetd, mock_check_port,
+ mock_check_algo, mock_check_tie, mock_check_tls, mock_check_h, mock_check_hm):
+ self.qdevice_with_ip.valid_qdevice_options()
+ mock_installed.assert_called_once_with("corosync-qdevice")
+ mock_check_qnetd.assert_called_once_with("10.10.10.123")
+
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_not_installed(self, mock_installed):
+ self.qdevice_with_ip.qnetd_ip = "10.10.10.123"
+ mock_installed.return_value = False
+ excepted_err_string = 'Package "corosync-qnetd" not installed on 10.10.10.123!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, install "corosync-qnetd" on 10.10.10.123.\nThen run command "crm cluster init" with "qdevice" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately.'
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_ip.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_with_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = True
+ mock_run.return_value = "data"
+ excepted_err_string = "This cluster's name \"cluster1\" already exists on qnetd server!\nPlease consider to use the different cluster-name property."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_stage_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("corosync-qnetd-tool -l -c cluster1", "10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_without_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = False
+ excepted_err_string = "This cluster's name \"hacluster1\" already exists on qnetd server!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, consider to use the different cluster-name property.\nThen run command \"crm cluster init\" with \"qdevice\" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("test -f /etc/corosync/qnetd/nssdb/cluster-hacluster1.crt", "10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.enable_service")
+ def test_enable_qnetd(self, mock_enable):
+ self.qdevice_with_ip.enable_qnetd()
+ mock_enable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.disable_service")
+ def test_disable_qnetd(self, mock_disable):
+ self.qdevice_with_ip.disable_qnetd()
+ mock_disable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.start_service")
+ def test_start_qnetd(self, mock_start):
+ self.qdevice_with_ip.start_qnetd()
+ mock_start.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.stop_service")
+ def test_stop_qnetd(self, mock_stop):
+ self.qdevice_with_ip.stop_qnetd()
+ mock_stop.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd_already_exists(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.return_value = [("10.10.10.123", (0, None, None))]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "test -f {}".format(mock_qnetd_cacert.return_value))
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.side_effect = [ValueError(mock.Mock(), "Failed on 10.10.10.123: error happen"),
+ [("10.10.10.123", (0, None, None))]]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+
+ mock_call.assert_has_calls([
+ mock.call(["10.10.10.123"], "test -f {}".format(mock_qnetd_cacert.return_value)),
+ mock.call(["10.10.10.123"], "corosync-qnetd-certutil -i")
+ ])
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 1: Initialize database on 10.10.10.123",
+ 'corosync-qnetd-certutil -i')
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd_exist(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = True
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 2: Fetch qnetd-cacert.crt from 10.10.10.123")
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_qnetd_crt_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("os.path.dirname")
+ def test_copy_qnetd_crt_to_cluster(self, mock_dirname, mock_qnetd_cacert_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_dirname.return_value = "/etc/corosync/qdevice/net/10.10.10.123"
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 3: Copy exported qnetd-cacert.crt to ['node2.com']")
+ mock_copy.assert_called_once_with(["node2.com"], mock_dirname.return_value,
+ "/etc/corosync/qdevice/net", True)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ def test_init_db_on_cluster(self, mock_list_nodes, mock_qnetd_cacert_local, mock_call, mock_log):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_call.return_value = [("node1", (0, None, None)), ("node2", (0, None, None))]
+
+ self.qdevice_with_ip.init_db_on_cluster()
+
+ mock_list_nodes.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 4: Initialize database on ['node1', 'node2']",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt')
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ "corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ def test_create_ca_request(self, mock_stdout_stderr, mock_log):
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_cluster_name.create_ca_request()
+
+ mock_log.assert_called_once_with("Step 5: Generate certificate request qdevice-net-node.crq",
+ 'corosync-qdevice-net-certutil -r -n hacluster1')
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -r -n hacluster1")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_crq_to_qnetd(self, mock_copy, mock_qdevice_crq_local,
+ mock_qdevice_crq_qnetd, mock_log):
+ mock_qdevice_crq_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq"
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+
+ self.qdevice_with_ip.copy_crq_to_qnetd()
+
+ mock_log.assert_called_once_with("Step 6: Copy qdevice-net-node.crq to 10.10.10.123")
+ mock_copy.assert_called_once_with(["10.10.10.123"], mock_qdevice_crq_local.return_value,
+ mock_qdevice_crq_qnetd.return_value, False)
+ mock_qdevice_crq_local.assert_called_once_with()
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ def test_sign_crq_on_qnetd(self, mock_qdevice_crq_qnetd, mock_call, mock_log):
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+ mock_call.return_value = ["10.10.10.123", (0, None, None)]
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.sign_crq_on_qnetd()
+
+ mock_log.assert_called_once_with("Step 7: Sign and export cluster certificate on 10.10.10.123",
+ 'corosync-qnetd-certutil -s -c /etc/corosync/qnetd/nssdb/qdevice-net-node.crq -n hacluster')
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "corosync-qnetd-certutil -s -c {} -n hacluster".format(mock_qdevice_crq_qnetd.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_cluster_crt_from_qnetd(self, mock_parallax_slurp, mock_crt_on_qnetd, mock_log):
+ mock_crt_on_qnetd.return_value = "/etc/corosync/qnetd/nssdb/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.fetch_cluster_crt_from_qnetd()
+
+ mock_log.assert_called_once_with("Step 8: Fetch cluster-hacluster.crt from 10.10.10.123")
+ mock_crt_on_qnetd.assert_has_calls([mock.call(), mock.call()])
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", mock_crt_on_qnetd.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_local", new_callable=mock.PropertyMock)
+ def test_import_cluster_crt(self, mock_crt_on_local, mock_stdout_stderr, mock_log):
+ mock_crt_on_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.import_cluster_crt()
+
+ mock_log.assert_called_once_with("Step 9: Import certificate file cluster-hacluster.crt on local",
+ 'corosync-qdevice-net-certutil -M -c /etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt')
+ mock_crt_on_local.assert_has_calls([mock.call(), mock.call()])
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -M -c {}".format(mock_crt_on_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_p12_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ def test_copy_p12_to_cluster(self, mock_p12_on_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_called_once_with("Step 10: Copy qdevice-net-node.p12 to ['node2.com']")
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_called_once_with(["node2.com"], mock_p12_on_local.return_value,
+ mock_p12_on_local.return_value, False)
+ mock_p12_on_local.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster_one_node(self, mock_list_nodes, mock_call, mock_log):
+ mock_list_nodes.return_value = []
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_not_called()
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster(self, mock_list_nodes, mock_p12_on_local, mock_log, mock_call):
+ mock_list_nodes.return_value = ["node2", "node3"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+ mock_call.return_value = [("node2", (0, None, None)), ("node3", (0, None, None))]
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_called_once_with("Step 11: Import qdevice-net-node.p12 on ['node2', 'node3']",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(
+ ["node2", "node3"],
+ "corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_local.return_value))
+ mock_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_p12_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.import_cluster_crt")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_cluster_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.sign_crq_on_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.copy_crq_to_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.create_ca_request")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_qnetd_crt_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_qnetd")
+ def test_certificate_process_on_init(self, mock_init_db_on_qnetd, mock_fetch_qnetd_crt_from_qnetd,
+ mock_copy_qnetd_crt_to_cluster, mock_init_db_on_cluster, mock_create_ca_request,
+ mock_copy_crq_to_qnetd, mock_sign_crq_on_qnetd, mock_fetch_cluster_crt_from_qnetd,
+ mock_import_cluster_crt, mock_copy_p12_to_cluster, mock_import_p12_on_cluster):
+
+ self.qdevice_with_ip.certificate_process_on_init()
+ mock_init_db_on_qnetd.assert_called_once_with()
+ mock_fetch_qnetd_crt_from_qnetd.assert_called_once_with()
+ mock_copy_qnetd_crt_to_cluster.assert_called_once_with()
+ mock_init_db_on_cluster.assert_called_once_with()
+ mock_create_ca_request.assert_called_once_with()
+ mock_copy_crq_to_qnetd.assert_called_once_with()
+ mock_sign_crq_on_qnetd.assert_called_once_with()
+ mock_fetch_cluster_crt_from_qnetd.assert_called_once_with()
+ mock_import_cluster_crt.assert_called_once_with()
+ mock_copy_p12_to_cluster.assert_called_once_with()
+ mock_import_p12_on_cluster.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster_exist(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_called_once_with("Step 1: Fetch qnetd-cacert.crt from node1.com")
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], "/etc/corosync/qdevice/net", mock_qnetd_cacert_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ def test_init_db_on_local(self, mock_qnetd_cacert_cluster, mock_stdout_stderr, mock_log):
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_ip_cluster_node.init_db_on_local()
+
+ mock_log.assert_called_once_with("Step 2: Initialize database on local",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt')
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster_exist(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_called_once_with("Step 3: Fetch qdevice-net-node.p12 from node1.com")
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], '/etc/corosync/qdevice/net', mock_p12_on_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ def test_import_p12_on_local(self, mock_p12_on_cluster, mock_stdout_stderr, mock_log):
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.import_p12_on_local()
+
+ mock_log.assert_called_once_with("Step 4: Import cluster certificate and key",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12')
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_p12_from_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_cluster")
+ def test_certificate_process_on_join(self, mock_fetch_qnetd_crt_from_cluster, mock_init_db_on_local,
+ mock_fetch_p12_from_cluster, mock_import_p12_on_local):
+ self.qdevice_with_ip.certificate_process_on_join()
+ mock_fetch_qnetd_crt_from_cluster.assert_called_once_with()
+ mock_init_db_on_local.assert_called_once_with()
+ mock_fetch_p12_from_cluster.assert_called_once_with()
+ mock_import_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_write_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_mksection, mock_str2file):
+ mock_mksection.side_effect = [
+ ["device {", "}"],
+ ["net {", "}"]
+ ]
+ mock_read_file.return_value = "data"
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.write_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.add.assert_has_calls([
+ mock.call('quorum', ["device {", "}"]),
+ mock.call('quorum.device', ["net {", "}"])
+ ])
+ mock_instance.set.assert_has_calls([
+ mock.call('quorum.device.votes', '1'),
+ mock.call('quorum.device.model', 'net'),
+ mock.call('quorum.device.net.tls', 'on'),
+ mock.call('quorum.device.net.host', '10.10.10.123'),
+ mock.call('quorum.device.net.port', 5403),
+ mock.call('quorum.device.net.algorithm', 'ffsplit'),
+ mock.call('quorum.device.net.tie_breaker', 'lowest')
+ ])
+ mock_instance.to_string.assert_called_once_with()
+ mock_mksection.assert_has_calls([
+ mock.call('quorum.device', []),
+ mock.call('quorum.device.net', [])
+ ])
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_remove_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_str2file):
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.remove_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db_not_exist(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_not_called()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = True
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_call.return_value = [("node1.com", (0, None, None)), ("node2.com", (0, None, None))]
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ 'rm -rf /etc/corosync/qdevice/net/*'.format())
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_qdevice_vote(self, mock_run, mock_get_value, mock_warning):
+ data = """
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 A,V,NMW 192.168.122.221 (local)
+ 0 0 Qdevice
+ """
+ mock_run.return_value = data
+ mock_get_value.return_value = "qnetd-node"
+ qdevice.QDevice.check_qdevice_vote()
+ mock_run.assert_called_once_with("corosync-quorumtool -s", success_exit_status={0, 2})
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_warning.assert_called_once_with("Qdevice's vote is 0, which simply means Qdevice can't talk to Qnetd(qnetd-node) for various reasons.")
+
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ def test_config_and_start_qdevice(self, mock_rm_db, mock_status_long, mock_evaluate):
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.certificate_process_on_init = mock.Mock()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice = mock.Mock()
+ self.qdevice_with_ip.config_qdevice = mock.Mock()
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ self.qdevice_with_ip.config_and_start_qdevice.__wrapped__(self.qdevice_with_ip)
+
+ mock_rm_db.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Qdevice certification process")
+ self.qdevice_with_ip.certificate_process_on_init.assert_called_once_with()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.config_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ def test_adjust_sbd_watchdog_timeout_with_qdevice(self, mock_check_reachable, mock_using_diskless_sbd, mock_get_sbd_value, mock_update_config, mock_get_timeout, mock_set_property):
+ mock_using_diskless_sbd.return_value = True
+ mock_get_sbd_value.return_value = ""
+ mock_get_timeout.return_value = 100
+
+ self.qdevice_with_stage_cluster_name.adjust_sbd_watchdog_timeout_with_qdevice()
+
+ mock_check_reachable.assert_called_once_with()
+ mock_using_diskless_sbd.assert_called_once_with()
+ mock_get_sbd_value.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+ mock_update_config.assert_called_once_with({"SBD_WATCHDOG_TIMEOUT": str(sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE)})
+ mock_set_property.assert_called_once_with("stonith-timeout", 100)
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_reload(self, mock_status, mock_cluster_run, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Starting corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("systemctl restart corosync-qdevice")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_restart(self, mock_status, mock_cluster_run, mock_wait, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Restarting cluster service"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_wait.assert_called_once_with()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("crm cluster restart")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_warn(self, mock_status, mock_cluster_run, mock_warn, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ ])
+ mock_warn.assert_called_once_with("To use qdevice service, need to restart cluster service manually on each node")
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.qdevice.QDevice.write_qdevice_config')
+ def test_config_qdevice(self, mock_write, mock_is_unicast, mock_add_nodelist, mock_status_long,
+ mock_update_votes, mock_run):
+ mock_is_unicast.return_value = False
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.config_qdevice()
+
+ mock_write.assert_called_once_with()
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Update configuration")
+ mock_update_votes.assert_called_once_with()
+ mock_run.assert_called_once_with("crm corosync reload")
+
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd_return(self, mock_configured):
+ mock_configured.return_value = False
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd(self, mock_configured, mock_get_value, mock_run):
+ mock_configured.return_value = True
+ mock_get_value.side_effect = ["qnetd-node", "cluster1"]
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("quorum.device.net.host"),
+ mock.call("totem.cluster_name")])
+ crt_file = "/etc/corosync/qnetd/nssdb/cluster-cluster1.crt"
+ crt_cmd = "test -f {crt_file} && rm -f {crt_file}".format(crt_file=crt_file)
+ crq_file = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.cluster1"
+ crq_cmd = "test -f {crq_file} && rm -f {crq_file}".format(crq_file=crq_file)
+ mock_run.assert_has_calls([
+ mock.call(crt_cmd, "qnetd-node"),
+ mock.call(crq_cmd, "qnetd-node")])
diff --git a/test/unittests/test_ratrace.py b/test/unittests/test_ratrace.py
new file mode 100644
index 0000000..6734b89
--- /dev/null
+++ b/test/unittests/test_ratrace.py
@@ -0,0 +1,131 @@
+import unittest
+from lxml import etree
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import cibconfig
+from crmsh.ui_context import Context
+from crmsh.ui_resource import RscMgmt
+from crmsh.ui_root import Root
+
+
+class TestRATrace(unittest.TestCase):
+ """Unit tests for enabling/disabling RA tracing."""
+
+ context = Context(Root())
+ factory = cibconfig.cib_factory
+
+ def setUp(self):
+ self.factory._push_state()
+
+ def tearDown(self):
+ self.factory._pop_state()
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_resource(self, mock_error):
+ """Check setting RA tracing for a resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy"/>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the resource.
+ RscMgmt()._trace_resource(self.context, obj.obj_id, obj, '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0', 'r1-stop-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-stop-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the resource.
+ RscMgmt()._untrace_resource(self.context, obj.obj_id, obj)
+ self.assertEqual(obj.node.xpath('operations/op/@id'), [])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op(self, mock_error):
+ """Check setting RA tracing for a specific operation."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'invalid-op')
+ self.assertEqual(str(err.exception), "Operation invalid-op not found in r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_new(self, mock_error):
+ """Check setting RA tracing for an operation that is not in CIB."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace a regular operation that is not yet defined in CIB. The request
+ # should succeed and introduce an op node for the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'start', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Try tracing the monitor operation in the same way. The request should
+ # get rejected because no explicit interval is specified.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(str(err.exception), "No monitor operation configured for r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_stateful(self, mock_error):
+ """Check setting RA tracing for an operation on a stateful resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor" role="Master"/>
+ <op id="r1-monitor-11" interval="11" name="monitor" role="Slave"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-11"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_interval(self, mock_error):
+ """Check setting RA tracing for an operation+interval."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'invalid-op', '10')
+ self.assertEqual(str(err.exception), "Operation invalid-op with interval 10 not found in r1")
diff --git a/test/unittests/test_report_collect.py b/test/unittests/test_report_collect.py
new file mode 100644
index 0000000..faec951
--- /dev/null
+++ b/test/unittests/test_report_collect.py
@@ -0,0 +1,588 @@
+from subprocess import TimeoutExpired
+from crmsh.report import collect, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestCollect(unittest.TestCase):
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log_no_config(self, mock_isfile, mock_warning):
+ mock_isfile.side_effect = [False, False, False]
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log"),
+ mock.call("/var/log/pacemaker.log")
+ ])
+ mock_warning.assert_called_once_with("No valid pacemaker log file found")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log(self, mock_isfile, mock_read, mock_warning):
+ mock_isfile.return_value = True
+ mock_read.return_value = """
+# has been enabled, those as well). This log is of more use to developers and
+# advanced system administrators, and when reporting problems.
+PCMK_logfile=/var/log/pacemaker/pacemaker.log
+
+# Set the permissions on the above log file to owner/group read/write
+ """
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "/var/log/pacemaker/pacemaker.log")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log")
+ ])
+ mock_read.assert_called_once_with(constants.PCMKCONF)
+
+ @mock.patch('crmsh.report.utils.dump_logset')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.collect.get_pcmk_log')
+ @mock.patch('crmsh.report.collect.get_corosync_log')
+ def test_collect_ha_logs(self, mock_corosync_log, mock_get_log, mock_isfile, mock_dump):
+ mock_corosync_log.return_value = "/var/log/cluster/corosync.log"
+ mock_get_log.return_value = "/var/pacemaker.log"
+ mock_isfile.side_effect = [True, True]
+ mock_ctx_inst = mock.Mock(extra_log_list=[])
+
+ collect.collect_ha_logs(mock_ctx_inst)
+
+ mock_get_log.assert_called_once_with()
+ mock_isfile.assert_has_calls([
+ mock.call(mock_get_log.return_value),
+ mock.call(mock_corosync_log.return_value)
+ ])
+ mock_dump.assert_has_calls([
+ mock.call(mock_ctx_inst, mock_get_log.return_value),
+ mock.call(mock_ctx_inst, mock_corosync_log.return_value)
+ ])
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log_not_exist(self, mock_conf, mock_exists, mock_warning):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_exists.return_value = False
+ self.assertEqual(collect.get_corosync_log(), "")
+
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log(self, mock_conf, mock_exists, mock_get_value):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_get_value.return_value = "/var/log/cluster/corosync.log"
+ mock_exists.return_value = True
+ self.assertEqual(collect.get_corosync_log(), mock_get_value.return_value)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_collect_journal_logs(self, mock_ts_to_str, mock_get_cmd_output,
+ mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.JOURNAL_F,
+ constants.JOURNAL_PCMK_F,
+ constants.JOURNAL_COROSYNC_F,
+ constants.JOURNAL_SBD_F
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1234, to_time=5678, work_dir="/opt/work")
+ mock_ts_to_str.side_effect = ["10.10", "10.12"]
+ mock_get_cmd_output.side_effect = ["data_default", "data_pacemaker", "data_corosync", "data_sbd"]
+ collect.collect_journal_logs(mock_ctx_inst)
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+ cmd_list = [
+ 'journalctl -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u pacemaker -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u corosync -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u sbd -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2'
+ ]
+ mock_get_cmd_output.assert_has_calls([
+ mock.call(cmd_list[0]),
+ mock.call(cmd_list[1]),
+ mock.call(cmd_list[2]),
+ mock.call(cmd_list[3]),
+ ])
+ mock_logger.debug2.assert_has_calls([
+ mock.call("Collect journal logs since: 10.10 until: 10.12"),
+ mock.call(f"Running command: {cmd_list[0]}"),
+ mock.call(f"Running command: {cmd_list[1]}"),
+ mock.call(f"Running command: {cmd_list[2]}"),
+ mock.call(f"Running command: {cmd_list[3]}"),
+ ])
+ mock_logger.debug.assert_has_calls([
+ mock.call(f"Dump jounal log for default into {constants.JOURNAL_F}"),
+ mock.call(f"Dump jounal log for pacemaker into {constants.JOURNAL_PCMK_F}"),
+ mock.call(f"Dump jounal log for corosync into {constants.JOURNAL_COROSYNC_F}"),
+ mock.call(f"Dump jounal log for sbd into {constants.JOURNAL_SBD_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process_empty(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, None, None)
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 0\n")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.side_effect = [
+ (0, "1000", None),
+ (0, "data1", None),
+ (0, "data2", None)
+ ]
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 1\npid: 1000 comm: data1\ndata2\n\n")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("ps aux|awk '$8 ~ /^D/{print $2}'"),
+ mock.call('cat /proc/1000/comm'),
+ mock.call('cat /proc/1000/stack')
+ ])
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_config(self, mock_exists, mock_debug):
+ mock_exists.return_value = False
+ mock_ctx_inst = mock.Mock()
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_debug.assert_called_once_with(f"SBD config file {constants.SBDCONF} does not exist")
+
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_cmd(self, mock_exists, mock_copy, mock_which):
+ mock_exists.return_value = True
+ mock_which.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info(self, mock_exists, mock_copy, mock_which, mock_run, mock_debug, mock_open_file, mock_real_path):
+ mock_real_path.return_value = constants.SBD_F
+ mock_exists.return_value = True
+ mock_which.return_value = True
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_run.return_value = "data"
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+
+ collect.collect_sbd_info(mock_ctx_inst)
+
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+ mock_open_file.assert_called_once_with(f"{mock_ctx_inst.work_dir}/{constants.SBD_F}", "w")
+ file_handle.write.assert_has_calls([
+ mock.call("\n\n#=====[ Command ] ==========================#\n"),
+ mock.call("# . /etc/sysconfig/sbd;export SBD_DEVICE;sbd dump;sbd list\n"),
+ mock.call("data")
+ ])
+ mock_debug.assert_called_once_with(f"Dump SBD config file into {constants.SBD_F}")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_pe_to_dot(self, mock_run, mock_warning):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, None)
+ collect.pe_to_dot("/opt/pe-input-0.bz2")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_simulate -D /opt/pe-input-0.dot -x /opt/pe-input-0.bz2")
+ mock_warning.assert_called_once_with('pe_to_dot: %s -> %s failed', '/opt/pe-input-0.bz2', '/opt/pe-input-0.dot')
+
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs_no_found(self, mock_logger, mock_find_files):
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir")
+ mock_find_files.return_value = []
+ collect.collect_pe_inputs(mock_ctx_inst)
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call("No PE file found for the giving time")
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.pe_to_dot')
+ @mock.patch('os.symlink')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs(self, mock_logger, mock_find_files, mock_mkdir, mock_symlink, mock_to_dot, mock_real_path):
+ mock_real_path.return_value = "pe_dir"
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir", work_dir="/opt/work_dir", speed_up=False)
+ mock_find_files.return_value = ["/opt/pe_dir/pe_input1", "/opt/pe_dir/pe_input2"]
+
+ collect.collect_pe_inputs(mock_ctx_inst)
+
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call(f"Found 2 PE files in {mock_ctx_inst.pe_dir}"),
+ ])
+ mock_logger.debug.assert_called_once_with(f"Dump PE files into pe_dir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ def test_collect_sys_stats(self, mock_run, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.SYSSTATS_F
+ mock_run.side_effect = [
+ "data_hostname", "data_uptime", "data_ps_axf", "data_ps_auxw",
+ "data_top", "data_ip_addr", "data_ip_link", "data_ip_show", "data_iscsi",
+ "data_lspci", "data_mount", "data_cpuinfo", TimeoutExpired("df", 5)
+ ]
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sys_stats(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"Timeout while running command: df")
+ mock_run.assert_has_calls([
+ mock.call("hostname", timeout=5),
+ mock.call("uptime", timeout=5),
+ mock.call("ps axf", timeout=5),
+ mock.call("ps auxw", timeout=5),
+ mock.call("top -b -n 1", timeout=5),
+ mock.call("ip addr", timeout=5),
+ mock.call("ip -s link", timeout=5),
+ mock.call("ip n show", timeout=5),
+ mock.call("lsscsi", timeout=5),
+ mock.call("lspci", timeout=5),
+ mock.call("mount", timeout=5),
+ mock.call("cat /proc/cpuinfo", timeout=5),
+ mock.call("df", timeout=5)
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_distro_info')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.uname')
+ @mock.patch('crmsh.report.utils.Package')
+ def test_collect_sys_info(self, mock_package, mock_uname, mock_str2file, mock_get_distro, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.SYSINFO_F
+ mock_package_inst = mock.Mock()
+ mock_package.return_value = mock_package_inst
+ mock_package_inst.version = mock.Mock(return_value="version_data\n")
+ mock_package_inst.verify = mock.Mock(return_value="verify_data\n")
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work")
+ mock_uname.return_value = ("Linux", None, "4.5", None, "x86_64")
+ mock_get_distro.return_value = "suse"
+
+ collect.collect_sys_info(mock_ctx_inst)
+
+ mock_package.assert_called_once_with(constants.PACKAGES)
+ mock_str2file.assert_called_once_with('##### System info #####\nPlatform: Linux\nKernel release: 4.5\nArchitecture: x86_64\nDistribution: suse\n\n##### Installed cluster related packages #####\nversion_data\n\n\n##### Verification output of packages #####\nverify_data\n', '/opt/work/sysinfo.txt')
+ mock_debug.assert_called_once_with(f"Dump packages and platform info into {constants.SYSINFO_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.dump_runtime_state')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_running(self, mock_service, mock_dump_state, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = True
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_stopped(self, mock_service, mock_copy2, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", cib_dir="/var/log/pacemaker/cib")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ @mock.patch('os.path.isfile')
+ def test_consume_cib_in_workdir(self, mock_isfile, mock_run, mock_str2file):
+ mock_isfile.return_value = True
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["data1", "data2"]
+ collect.consume_cib_in_workdir("/workdir")
+ mock_isfile.assert_called_once_with(f"/workdir/{constants.CIB_F}")
+ mock_run_inst.get_stdout_or_raise_error.assert_has_calls([
+ mock.call('CIB_file=/workdir/cib.xml crm configure show'),
+ mock.call('crm_verify -V -x /workdir/cib.xml')
+ ])
+ mock_str2file.assert_has_calls([
+ mock.call("data1", f"/workdir/{constants.CONFIGURE_SHOW_F}"),
+ mock.call("data2", f"/workdir/{constants.CRM_VERIFY_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces_return(self, mock_run, mock_logger):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"])
+ collect.collect_ratraces(mock_ctx_inst)
+ mock_logger.debug2.assert_not_called()
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces(self, mock_run, mock_find, mock_mkdirp, mock_copy, mock_logger, mock_real_path):
+ mock_real_path.return_value = "/var/log"
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "INFO: Trace for .* is written to /var/log/cluster/pacemaker.log"
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, data, None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work")
+ mock_find.return_value = ["/var/log/cluster"]
+
+ collect.collect_ratraces(mock_ctx_inst)
+
+ mock_logger.debug2.assert_called_once_with('Looking for RA trace files in "%s"', '/var/log/cluster')
+ mock_logger.debug.assert_called_once_with(f'Dump RA trace files into {mock_real_path.return_value}')
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_lsof_ocfs2_device(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mount_data = """
+/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)
+tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64)
+/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non
+ """
+ mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)]
+ res = collect.lsof_ocfs2_device()
+ self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("mount"),
+ mock.call("lsof /dev/sda7")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('os.path.exists')
+ @mock.patch('shutil.which')
+ def test_ocfs2_commands_output(self, mock_which, mock_exists, mock_run):
+ mock_which.side_effect = [False for i in range(5)] + [True, True]
+ mock_exists.return_value = False
+ mock_run.return_value = "data"
+ res = collect.ocfs2_commands_output()
+ self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mount\ndata")
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_error(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, "error")
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('Failed to run "mounted.ocfs2 -d": error', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_no_found(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('No ocfs2 partitions found', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.ocfs2_commands_output')
+ @mock.patch('crmsh.report.collect.lsof_ocfs2_device')
+ @mock.patch('crmsh.report.collect.dump_D_process')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info(self, mock_run, mock_str2file, mock_debug2, mock_D, mock_lsof, mock_output, mock_real_path):
+ mock_real_path.return_value = constants.OCFS2_F
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "line1\nline2", None)
+ mock_D.return_value = "data_D\n"
+ mock_lsof.return_value = "data_lsof\n"
+ mock_output.return_value = "data_output\n"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('data_D\ndata_lsof\ndata_output\n', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ def test_collect_dlm_info(self, mock_which, mock_get_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.DLM_DUMP_F
+ mock_which.return_value = True
+ ls_data = """
+dlm lockspaces
+name 08BB5A6A38EE491DBF63627EEB57E558
+id 0x19041a12
+ """
+ mock_get_output.side_effect = [ls_data, "lockdebug data", "dump data"]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ collect.collect_dlm_info(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump DLM information into {constants.DLM_DUMP_F}")
+
+ @mock.patch('crmsh.report.collect.dump_core_info')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_coredump_info(self, mock_find, mock_basename, mock_warning, mock_dump):
+ mock_ctx_inst = mock.Mock(cores_dir_list=['/var/lib/pacemaker/cores'], work_dir="/opt/work_dir")
+ mock_find.return_value = ["/var/lib/pacemaker/cores/core.1"]
+ mock_basename.return_value = "core.1"
+ collect.collect_coredump_info(mock_ctx_inst)
+ mock_dump.assert_called_once_with("/opt/work_dir", mock_find.return_value)
+ mock_warning.assert_called_once_with(f"Found coredump file: {mock_find.return_value}")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core_not_found(self, mock_run):
+ mock_run().get_stdout_stderr.return_value = (0, "Core not found", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Cannot find the program path for core core.1", res)
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "Core was generated by `/usr/sbin/crm_mon'", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Core core.1 was generated by /usr/sbin/crm_mon", res)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.which')
+ def test_dump_core_info_no_gdb(self, mock_which, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = False
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_logger.warning.assert_called_once_with("Please install gdb to get more info for coredump files")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.find_binary_path_for_core')
+ @mock.patch('shutil.which')
+ def test_dump_core_info(self, mock_which, mock_find_binary, mock_str2file, mock_debug2, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = True
+ mock_find_binary.return_value = "data"
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_str2file.assert_called_once_with("data\n\nPlease utilize the gdb and debuginfo packages to obtain more detailed information locally", f"/opt/workdir/{constants.COREDUMP_F}")
+ mock_debug2(f"Dump coredump info into {constants.COREDUMP_F}")
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('pwd.getpwnam')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_collect_perms_state(self, mock_isdir, mock_stat, mock_getpwnam, mock_str2file):
+ mock_ctx_inst = mock.Mock(
+ pcmk_lib_dir="/var/lib/pacemaker",
+ pe_dir="/var/lib/pacemaker/pe",
+ cib_dir="/var/lib/pacemaker/cib",
+ work_dir="/opt/work_dir"
+ )
+ mock_isdir.side_effect = [False, True, True]
+ mock_stat_inst_pe = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat_inst_cib = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat.side_effect = [mock_stat_inst_pe, mock_stat_inst_cib]
+ mock_getpwnam_inst_pe = mock.Mock(pw_uid=1000, pw_gid=1000)
+ mock_getpwnam_inst_cib = mock.Mock(pw_uid=1001, pw_gid=1000)
+ mock_getpwnam.side_effect = [mock_getpwnam_inst_pe, mock_getpwnam_inst_cib]
+
+ collect.collect_perms_state(mock_ctx_inst)
+
+ data = "##### Check perms for /var/lib/pacemaker: /var/lib/pacemaker is not a directory or does not exist\n##### Check perms for /var/lib/pacemaker/pe: OK\n##### Check perms for /var/lib/pacemaker/cib: Permissions or ownership for /var/lib/pacemaker/cib are incorrect\n"
+ mock_str2file.assert_called_once_with(data, f"/opt/work_dir/{constants.PERMISSIONS_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.get_dc')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_dump_runtime_state(self, mock_run, mock_str2file, mock_debug, mock_get_dc, mock_this_node, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.CRM_MON_F,
+ constants.CIB_F,
+ constants.MEMBERSHIP_F,
+ "workdir"
+ ]
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["crm_mon_data", "cib_data", "crm_node_data"]
+ mock_get_dc.return_value = "node1"
+ mock_this_node.return_value = "node1"
+ collect.dump_runtime_state("/opt/workdir")
+ mock_debug.assert_has_calls([
+ mock.call(f"Dump cluster state into {constants.CRM_MON_F}"),
+ mock.call(f"Dump CIB contents into {constants.CIB_F}"),
+ mock.call(f"Dump members of this partition into {constants.MEMBERSHIP_F}"),
+ mock.call(f"Current DC is node1; Touch file 'DC' in workdir")
+ ])
+
+ @mock.patch('shutil.copytree')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.corosync.conf')
+ def test_dump_configurations(self, mock_corosync_conf, mock_isfile, mock_copy2, mock_isdir, mock_basename, mock_copytree):
+ mock_corosync_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_isfile.side_effect = [True, True, False, True]
+ mock_isdir.return_value = True
+ mock_basename.return_value = "drbd.d"
+ collect.dump_configurations("/opt/workdir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_corosync_blackbox(self, mock_find_files, mock_get_cmd_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.COROSYNC_RECORDER_F
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ mock_find_files.return_value = ["/var/lib/corosync/fdata.1"]
+ mock_get_cmd_output.return_value = "data"
+ collect.collect_corosync_blackbox(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump corosync blackbox info into {constants.COROSYNC_RECORDER_F}")
diff --git a/test/unittests/test_report_core.py b/test/unittests/test_report_core.py
new file mode 100644
index 0000000..dd6e842
--- /dev/null
+++ b/test/unittests/test_report_core.py
@@ -0,0 +1,551 @@
+from crmsh import config
+from crmsh.report import core, constants, utils, collect
+import crmsh.log
+
+import sys
+import argparse
+import unittest
+from unittest import mock
+
+
+class TestCapitalizedHelpFormatter(unittest.TestCase):
+ def setUp(self):
+ # Initialize the ArgumentParser with the CapitalizedHelpFormatter
+ self.parser = argparse.ArgumentParser(
+ formatter_class=core.CapitalizedHelpFormatter,
+ usage="usage: test"
+ )
+ self.parser.add_argument('--test', help='Test option')
+
+ def test_usage(self):
+ # Test that the usage is capitalized
+ usage_text = self.parser.format_usage()
+ self.assertTrue(usage_text.startswith('Usage: '))
+
+ def test_section_heading(self):
+ # Test that section headings are capitalized
+ section_text = self.parser.format_help()
+ self.assertTrue('Option' in section_text)
+
+
+class TestContext(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.parse_to_timestamp')
+ @mock.patch('crmsh.report.utils.now')
+ @mock.patch('crmsh.report.core.config')
+ def setUp(self, mock_config, mock_now, mock_parse_to_timestamp):
+ mock_config.report = mock.Mock(
+ from_time="20230101",
+ compress=False,
+ collect_extra_logs="file1 file2",
+ remove_exist_dest=False,
+ single_node=False
+ )
+ mock_now.return_value = "12345"
+ mock_parse_to_timestamp.return_value = "54321"
+ self.context = core.Context()
+ self.context.load()
+
+ def test_attribute_setting(self):
+ self.context.name = "value"
+ self.assertEqual(self.context.name, "value")
+ self.context["age"] = 19
+ self.assertEqual(self.context.age, 19)
+ self.context.extra_log_list = ["file3", "file2"]
+ self.assertEqual(len(self.context.extra_log_list), 3)
+
+ @mock.patch('json.dumps')
+ def test_str(self, mock_dumps):
+ mock_dumps.return_value = "json str"
+ self.assertEqual(self.context.name, "crm_report")
+ self.assertEqual(self.context.from_time, "54321")
+ self.assertEqual(str(self.context), "json str")
+
+
+class TestRun(unittest.TestCase):
+
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dest_not_exist(self, mock_isdir):
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Directory /opt/test does not exist", str(err.exception))
+
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_filename_not_sane(self, mock_isdir, mock_basename, mock_sane):
+ mock_isdir.return_value = True
+ mock_sane.return_value = False
+ mock_basename.return_value = "report*"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report*")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("report* is invalid file name", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('shutil.rmtree')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists_rmtree(self, mock_isdir, mock_basename, mock_sane, mock_rmtree, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=True)
+ core.process_dest(mock_ctx_inst)
+ mock_rmtree.assert_called_once_with("/opt/test/report")
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists(self, mock_isdir, mock_basename, mock_sane, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=False)
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Destination directory /opt/test/report exists, please cleanup or use -Z option", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.utils.now')
+ def test_process_dest(self, mock_now, mock_isdir, mock_basename, mock_is_sane, mock_pick):
+ mock_now.return_value = "Mon-28-Aug-2023"
+ mock_isdir.side_effect = [True, False]
+ mock_is_sane.return_value = True
+ mock_basename.return_value = f"report.{mock_now.return_value}"
+ mock_ctx_inst = mock.Mock(dest=None, no_compress=False, compress_suffix=".bz2", name="report")
+
+ core.process_dest(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.dest_dir, ".")
+ mock_is_sane.assert_called_once_with(mock_basename.return_value)
+ self.assertEqual(mock_ctx_inst.dest_path, "./report.Mon-28-Aug-2023.tar.bz2")
+
+ @mock.patch('crmsh.report.core.pick_first_compress')
+ def test_pick_compress_prog(self, mock_pick):
+ mock_pick.return_value = (None, None)
+ mock_ctx_inst = mock.Mock()
+ core.pick_compress_prog(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.compress_prog, "cat")
+
+ @mock.patch('shutil.which')
+ def test_pick_first_compress_return(self, mock_which):
+ mock_which.return_value = True
+ prog, ext = core.pick_first_compress()
+ self.assertEqual(prog, "gzip")
+ self.assertEqual(ext, ".gz")
+ mock_which.assert_called_once_with("gzip")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_pick_first_compress(self, mock_which, mock_warn):
+ mock_which.side_effect = [False, False, False]
+ prog, ext = core.pick_first_compress()
+ self.assertIsNone(prog)
+ self.assertIsNone(ext)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('logging.Logger.info')
+ def test_finalword(self, mock_info, mock_get_timespan):
+ mock_ctx_inst = mock.Mock(dest_path="./crm_report-Tue-15-Aug-2023.tar.bz2", node_list=["node1", "node2"])
+ mock_get_timespan.return_value = "2023-08-14 18:17 - 2023-08-15 06:17"
+ core.finalword(mock_ctx_inst)
+ mock_info.assert_has_calls([
+ mock.call(f"The report is saved in {mock_ctx_inst.dest_path}"),
+ mock.call(f"Report timespan: {mock_get_timespan.return_value}"),
+ mock.call(f"Including nodes: {' '.join(mock_ctx_inst.node_list)}"),
+ mock.call("Thank you for taking time to create this report")
+ ])
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir_collector(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir", me="node1")
+ mock_collector.return_value = True
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir")
+ mock_collector.return_value = False
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.load_from_crmsh_config')
+ def test_load_context_attributes(self, mock_load, mock_isdir):
+ mock_ctx_inst = mock.Mock(cib_dir="/var/lib/pacemaker/cib")
+ mock_isdir.return_value = True
+
+ core.load_context_attributes(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.pcmk_lib_dir, "/var/lib/pacemaker")
+ self.assertEqual(mock_ctx_inst.cores_dir_list, ["/var/lib/pacemaker/cores", constants.COROSYNC_LIB])
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ crm_daemon_dir="/usr/lib/pacemaker",
+ pe_state_dir="/var/lib/pacemaker/pe"
+ )
+ mock_isdir.side_effect = [True, True, True]
+ mock_ctx_inst = mock.Mock()
+
+ core.load_from_crmsh_config(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.cib_dir, mock_config.path.crm_config)
+ self.assertEqual(mock_ctx_inst.pcmk_exec_dir, mock_config.path.crm_daemon_dir)
+ self.assertEqual(mock_ctx_inst.pe_dir, mock_config.path.pe_state_dir)
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config_exception(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ )
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock()
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.load_from_crmsh_config(mock_ctx_inst)
+ self.assertEqual(f"Cannot find CIB directory", str(err.exception))
+
+ def test_adjust_verbosity_debug(self):
+ mock_ctx_inst = mock.Mock(debug=1)
+ core.adjust_verbosity(mock_ctx_inst)
+
+ def test_adjust_verbosity(self):
+ mock_ctx_inst = mock.Mock(debug=0)
+ config.core.debug = True
+ core.adjust_verbosity(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('json.loads')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_load_context(self, mock_logger, mock_json_loads, mock_config, mock_verbosity):
+ class Context:
+ def __str__(self):
+ return "data"
+ def __setitem__(self, key, value):
+ self.__dict__[key] = value
+
+ sys.argv = ["arg1", "arg2", "arg3"]
+ mock_config.report = mock.Mock(verbosity=None)
+ mock_json_loads.return_value = {"key": "value", "debug": "true"}
+ mock_ctx_inst = Context()
+ core.load_context(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with("Loading context from collector: data")
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.process_arguments')
+ @mock.patch('crmsh.utils.check_empty_option_value')
+ @mock.patch('crmsh.report.core.add_arguments')
+ def test_parse_arguments(self, mock_parse, mock_check_space, mock_process, mock_verbosity):
+ mock_args = mock.Mock(option1="value1")
+ mock_parse.return_value = mock_args
+ mock_ctx_inst = mock.Mock()
+
+ core.parse_arguments(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.option1, "value1")
+
+ mock_check_space.assert_called_once_with(mock_args)
+ mock_process.assert_called_once_with(mock_ctx_inst)
+
+ def test_is_collector(self):
+ sys.argv = ["report", "__collector"]
+ self.assertEqual(core.is_collector(), True)
+
+ @mock.patch('crmsh.report.core.push_data')
+ @mock.patch('crmsh.report.core.collect_logs_and_info')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl_collector(self, mock_context, mock_collector, mock_load, mock_setup, mock_collect_info, mock_push):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [True, True]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_collect_info.assert_called_once_with(mock_ctx_inst)
+ mock_push.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.process_results')
+ @mock.patch('crmsh.report.core.collect_for_nodes')
+ @mock.patch('crmsh.report.core.find_ssh_user')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context_attributes')
+ @mock.patch('crmsh.report.core.parse_arguments')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl(self, mock_context, mock_collector, mock_parse, mock_load, mock_setup, mock_find_ssh, mock_collect, mock_process_results):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [False, False]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_parse.assert_called_once_with(mock_ctx_inst)
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_find_ssh.assert_called_once_with(mock_ctx_inst)
+ mock_collect.assert_called_once_with(mock_ctx_inst)
+ mock_process_results.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception_generic(self, mock_run, mock_log_error):
+ mock_run.side_effect = utils.ReportGenericError("error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_log_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception(self, mock_run, mock_print):
+ mock_run.side_effect = UnicodeDecodeError("encoding", b'', 0, 1, "error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_print.assert_called_once_with()
+
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments_help(self, mock_argparse, mock_formatter):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+
+ with self.assertRaises(SystemExit):
+ core.add_arguments()
+
+ mock_argparse_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments(self, mock_argparse, mock_formatter, mock_config):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=False, debug=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+ mock_config.report = mock.Mock(verbosity=False)
+
+ core.add_arguments()
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.to_ascii')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_push_data(self, mock_sh_utils, mock_to_ascii, mock_logger):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, "data", "error")
+ mock_to_ascii.return_value = "error"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", main_node="node1", me="node1")
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.push_data(mock_ctx_inst)
+ self.assertEqual("error", str(err.exception))
+
+ mock_logger.debug2.assert_called_once_with("Pushing data from node1:/opt/work_dir to node1")
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with("cd /opt/work_dir/.. && tar -h -c node1", raw=True)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('shutil.move')
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ def test_process_results_no_compress(self, mock_analyze, mock_create, mock_move, mock_final):
+ mock_ctx_inst = mock.Mock(speed_up=True, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=True)
+ core.process_results(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+ mock_move.assert_called_once_with(mock_ctx_inst.work_dir, mock_ctx_inst.dest_dir)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('crmsh.report.core.sh.cluster_shell')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ @mock.patch('crmsh.report.utils.do_sanitize')
+ def test_process_results(self, mock_sanitize, mock_analyze, mock_create, mock_debug2, mock_run, mock_final):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error = mock.Mock()
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=False, dest="report", compress_prog="tar", compress_suffix=".bz2")
+ core.process_results(mock_ctx_inst)
+ mock_sanitize.assert_called_once_with(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.getmembers')
+ @mock.patch('multiprocessing.cpu_count')
+ @mock.patch('multiprocessing.Pool')
+ def test_collect_logs_and_info(self, mock_pool, mock_cpu_count, mock_getmember, mock_print):
+ mock_cpu_count.return_value = 4
+ mock_pool_inst = mock.Mock()
+ mock_pool.return_value = mock_pool_inst
+ mock_pool_inst.apply_async = mock.Mock()
+ mock_async_inst1 = mock.Mock()
+ mock_async_inst2 = mock.Mock()
+ mock_pool_inst.apply_async.side_effect = [mock_async_inst1, mock_async_inst2]
+ mock_async_inst1.get = mock.Mock()
+ mock_async_inst2.get = mock.Mock(side_effect=ValueError)
+ mock_pool_inst.close = mock.Mock()
+ mock_pool_inst.join = mock.Mock()
+ mock_getmember.return_value = [("collect_func1", None), ("collect_func2", None)]
+ collect.collect_func1 = mock.Mock()
+ collect.collect_func2 = mock.Mock()
+ mock_ctx_inst = mock.Mock()
+
+ core.collect_logs_and_info(mock_ctx_inst)
+ mock_pool.assert_called_once_with(3)
+
+ @mock.patch('multiprocessing.Process')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.report.core.start_collector')
+ def test_collect_for_nodes(self, mock_start_collector, mock_info, mock_process):
+ mock_ctx_inst = mock.Mock(
+ node_list=["node1", "node2"],
+ ssh_askpw_node_list=["node2"],
+ ssh_user=""
+ )
+ mock_process_inst = mock.Mock()
+ mock_process.return_value = mock_process_inst
+ core.collect_for_nodes(mock_ctx_inst)
+
+ def test_process_arguments_value_error(self):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=100)
+ with self.assertRaises(ValueError) as err:
+ core.process_arguments(mock_ctx_inst)
+ self.assertEqual("The start time must be before the finish time", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_exception(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=[])
+ mock_list_nodes.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual("Could not figure out a list of nodes; is this a cluster node?", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_single(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=True, me="node1")
+ core.process_node_list(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list(self, mock_list_nodes, mock_ping, mock_error):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=False, me="node1")
+ mock_ping.side_effect = ValueError("error")
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.node_list, ["node1"])
+
+ @mock.patch('crmsh.report.core.process_node_list')
+ @mock.patch('crmsh.report.core.process_dest')
+ def test_process_arguments(self, mock_dest, mock_node_list):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=150)
+ core.process_arguments(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user_not_found(self, mock_get_sudoer, mock_getuser, mock_check_ssh, mock_logger):
+ mock_get_sudoer.return_value = ""
+ mock_getuser.return_value = "user2"
+ mock_check_ssh.return_value = True
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"], me="node1")
+ core.find_ssh_user(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"passwordless ssh to node(s) ['node2'] does not work")
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user(self, mock_get_sudoer, mock_getuser, mock_this_node, mock_check_ssh, mock_debug, mock_warn, mock_debug2):
+ mock_get_sudoer.return_value = "user1"
+ mock_getuser.return_value = "user2"
+ mock_this_node.return_value = "node1"
+ mock_check_ssh.return_value = False
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"])
+ core.find_ssh_user(mock_ctx_inst)
+ self.assertEqual("sudo", mock_ctx_inst.sudo)
+ self.assertEqual("user1", mock_ctx_inst.ssh_user)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_start_collector_return(self, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, '', None)
+ mock_ctx_inst = mock.Mock(me="node1")
+ core.start_collector("node1", mock_ctx_inst)
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with(f"{constants.BIN_COLLECTOR} '{mock_ctx_inst}'")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector_warn(self, mock_this_node, mock_sh, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (1, '', "error")
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ core.start_collector("node1", mock_ctx_inst)
+ mock_warn.assert_called_once_with("error")
+
+ @mock.patch('ast.literal_eval')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector(self, mock_this_node, mock_sh_utils, mock_sh, mock_eval):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (0, f"line1\n{constants.COMPRESS_DATA_FLAG}data", None)
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ mock_eval.return_value = "data"
+ core.start_collector("node1", mock_ctx_inst)
diff --git a/test/unittests/test_report_utils.py b/test/unittests/test_report_utils.py
new file mode 100644
index 0000000..aa28563
--- /dev/null
+++ b/test/unittests/test_report_utils.py
@@ -0,0 +1,862 @@
+import sys
+import datetime
+from crmsh import config
+from crmsh import utils as crmutils
+from crmsh.report import utils, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestPackage(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.get_pkg_mgr')
+ def setUp(self, mock_get_pkg_mgr):
+ mock_get_pkg_mgr.side_effect = [None, "rpm", "deb"]
+ self.inst_none = utils.Package("xxx1 xxx2")
+ self.inst = utils.Package("rpm1 rpm2")
+ self.inst_deb = utils.Package("deb1 deb2")
+
+ def test_version_return(self):
+ res = self.inst_none.version()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.pkg_ver_rpm')
+ def test_version(self, mock_ver_rpm):
+ mock_ver_rpm.return_value = "version1"
+ res = self.inst.version()
+ self.assertEqual(res, "version1")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "rpm1-4.5.0\nrpm2 not installed"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst.pkg_ver_rpm()
+ self.assertEqual(res, "rpm1-4.5.0")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "deb1-4.5.0\nno packages found"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst_deb.pkg_ver_deb()
+ self.assertEqual(res, "deb1-4.5.0")
+
+ def test_verify_return(self):
+ res = self.inst_none.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.verify_rpm')
+ def test_verify(self, mock_verify_rpm):
+ mock_verify_rpm.return_value = ""
+ res = self.inst.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst.verify_rpm()
+ self.assertEqual(res, "verify data")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst_deb.verify_deb()
+ self.assertEqual(res, "verify data")
+
+
+class TestSanitizer(unittest.TestCase):
+
+ def setUp(self):
+ mock_ctx_inst_no_sanitize = mock.Mock(sanitize=False)
+ self.s_inst_no_sanitize = utils.Sanitizer(mock_ctx_inst_no_sanitize)
+
+ mock_ctx_inst_no_sanitize_set = mock.Mock(sensitive_regex_list=[])
+ self.s_inst_no_sanitize_set = utils.Sanitizer(mock_ctx_inst_no_sanitize_set)
+
+ mock_ctx_inst = mock.Mock(sanitize=True, work_dir="/opt", sensitive_regex_list=["test_patt"])
+ self.s_inst = utils.Sanitizer(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_return(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_warning):
+ mock_include.return_value = True
+ self.s_inst_no_sanitize.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_warning.assert_has_calls([
+ mock.call("Some PE/CIB/log files contain possibly sensitive data"),
+ mock.call("Using \"-s\" option can replace sensitive data")
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer._get_file_list_in_work_dir')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_get_file):
+ mock_include.return_value = True
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_get_file.assert_called_once_with
+
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_no_sensitive_data(self, mock_load_cib, mock_parse, mock_extract, mock_include):
+ mock_include.return_value = False
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+
+ def test_include_sensitive_data(self):
+ res = self.s_inst._include_sensitive_data()
+ self.assertEqual(res, [])
+
+ @mock.patch('os.walk')
+ def test_get_file_list_in_work_dir(self, mock_walk):
+ mock_walk.return_value = [
+ ("/opt", [], ["file1", "file2"]),
+ ("/opt/dir1", [], ["file3"]),
+ ]
+ self.s_inst._get_file_list_in_work_dir()
+ self.assertEqual(self.s_inst.file_list_in_workdir, ['/opt/file1', '/opt/file2', '/opt/dir1/file3'])
+
+ @mock.patch('glob.glob')
+ def test_load_cib_from_work_dir_no_cib(self, mock_glob):
+ mock_glob.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"CIB file {constants.CIB_F} was not collected", str(err.exception))
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir_empty(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"File /opt/node1/{constants.CIB_F} is empty", str(err.exception))
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = "data"
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(self.s_inst.cib_data, "data")
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set_no_set(self, mock_logger):
+ config.report.sanitize_rule = ""
+ self.s_inst_no_sanitize_set._parse_sensitive_set()
+ self.assertEqual(self.s_inst_no_sanitize_set.sensitive_regex_set, set(utils.Sanitizer.DEFAULT_RULE_LIST))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(utils.Sanitizer.DEFAULT_RULE_LIST)}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set(self, mock_logger):
+ config.report.sanitize_rule = "passw.*"
+ self.s_inst._parse_sensitive_set()
+ self.assertEqual(self.s_inst.sensitive_regex_set, set(['test_patt', 'passw.*']))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(['test_patt', 'passw.*'])}")
+
+ def test_sanitize_return(self):
+ self.s_inst_no_sanitize.sanitize()
+
+ @mock.patch('crmsh.report.utils.write_to_file')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.Sanitizer._sub_sensitive_string')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_sanitize(self, mock_read, mock_sub, mock_debug, mock_write):
+ self.s_inst.file_list_in_workdir = ["file1", "file2"]
+ mock_read.side_effect = [None, "data"]
+ mock_sub.return_value = "replace_data"
+ self.s_inst.sanitize()
+ mock_debug.assert_called_once_with("Replace sensitive info for %s", "file2")
+
+ def test_extract_from_cib(self):
+ self.s_inst.cib_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ """
+ res = self.s_inst._extract_from_cib("passw.*")
+ self.assertEqual(res, ["qwertyui"])
+
+ def test_sub_sensitive_string(self):
+ data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="13356789876"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ This my tel 13356789876
+ """
+ self.s_inst.sensitive_value_list_with_raw_option = ["13356789876"]
+ self.s_inst.sensitive_key_list = ["passw.*"]
+ self.s_inst.sensitive_value_list = ["qwertyui"]
+ res = self.s_inst._sub_sensitive_string(data)
+ expected_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="******"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="******"/>
+ </utilization>
+ This my tel ******
+ """
+ self.assertEqual(res, expected_data)
+
+ @mock.patch('logging.Logger.warning')
+ def test_extract_sensitive_value_list_warn(self, mock_warn):
+ self.s_inst.sensitive_regex_set = set(["TEL:test"])
+ self.s_inst._extract_sensitive_value_list()
+ mock_warn.assert_called_once_with("For sanitize pattern TEL:test, option should be \"raw\"")
+
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_from_cib')
+ def test_extract_sensitive_value_list(self, mock_extract):
+ mock_extract.side_effect = [["123456"], ["qwertyui"]]
+ self.s_inst.sensitive_regex_set = set(["TEL:raw", "passw.*"])
+ self.s_inst._extract_sensitive_value_list()
+
+class TestUtils(unittest.TestCase):
+
+ @mock.patch('builtins.sorted', side_effect=lambda x, *args, **kwargs: x[::-1])
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.report.utils.is_our_log')
+ def test_arch_logs(self, mock_is_our_log, mock_glob, mock_logger, mock_timespan, mock_sorted):
+ mock_is_our_log.return_value = utils.LogType.GOOD
+ mock_glob.return_value = []
+ mock_ctx_inst = mock.Mock()
+ mock_timespan.return_value = "0101-0202"
+
+ return_list, log_type = utils.arch_logs(mock_ctx_inst, "file1")
+
+ self.assertEqual(return_list, ["file1"])
+ self.assertEqual(log_type, utils.LogType.GOOD)
+ mock_logger.debug2.assert_called_once_with("Found logs ['file1'] in 0101-0202")
+
+ @mock.patch('sys.stdout.flush')
+ @mock.patch('traceback.print_exc')
+ def test_print_traceback(self, mock_trace, mock_flush):
+ utils.print_traceback()
+ mock_trace.assert_called_once_with()
+
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_get_timespan_str(self, mock_ts_to_str):
+ mock_ctx_inst = mock.Mock(from_time=1691938980.0, to_time=1691982180.0)
+ mock_ts_to_str.side_effect = ["2023-08-13 23:03", "2023-08-14 11:03"]
+ res = utils.get_timespan_str(mock_ctx_inst)
+ self.assertEqual(res, "2023-08-13 23:03 - 2023-08-14 11:03")
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_get_cmd_output(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "stdout_data", "stderr_data")
+ res = utils.get_cmd_output("cmd")
+ self.assertEqual(res, "stdout_data\nstderr_data\n")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("cmd", timeout=None)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_empty(self, mock_read):
+ mock_read.return_value = None
+ mock_ctx_inst = mock.Mock()
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.EMPTY)
+ mock_read.assert_called_once_with("/opt/logfile")
+
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_irregular(self, mock_read, mock_log_format):
+ mock_read.return_value = "This is the log"
+ mock_ctx_inst = mock.Mock()
+ mock_log_format.return_value = None
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.IRREGULAR)
+ mock_read.assert_called_once_with("/opt/logfile")
+ mock_log_format.assert_called_once_with(mock_read.return_value)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_before(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1600, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.BEFORE_TIMESPAN)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_good(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1200, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.GOOD)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_after(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=200, to_time=800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.AFTER_TIMESPAN)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr_unknown(self, mock_which, mock_warning):
+ mock_which.side_effect = [False, False]
+ self.assertEqual(utils.get_pkg_mgr(), "")
+ mock_warning.assert_called_once_with("Unknown package manager!")
+
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr(self, mock_which):
+ mock_which.return_value = True
+ utils.get_pkg_mgr()
+ self.assertEqual(utils.get_pkg_mgr(), "rpm")
+
+ @mock.patch('os.walk')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_find_files_in_timespan(self, mock_isdir, mock_stat, mock_walk):
+ mock_isdir.side_effect = [True, False]
+ mock_stat.return_value = mock.Mock(st_ctime=1615)
+ mock_walk.return_value = [
+ ('/mock_dir', [], ['file1.txt', 'file2.txt'])
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1611, to_time=1620)
+
+ res = utils.find_files_in_timespan(mock_ctx_inst, ['/mock_dir', '/not_exist'])
+
+ expected_result = ['/mock_dir/file1.txt', '/mock_dir/file2.txt']
+ self.assertEqual(res, expected_result)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_return(self, mock_arch, mock_debug, mock_timespan):
+ mock_arch.return_value = [[], ""]
+ mock_ctx_inst = mock.Mock()
+ utils.dump_logset(mock_ctx_inst, "file")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_irrgular(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.IRREGULAR]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+ utils.dump_logset(mock_ctx_inst, "file1")
+ mock_print.assert_called_once_with("file1", 0, 0)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_one(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_called_once_with("file1", 10, 20)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1", "file2", "file3"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.side_effect = ["data1\n", "data2\n", "data3\n"]
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_has_calls([
+ mock.call("file3", 10, 0),
+ mock.call("file2", 0, 0),
+ mock.call("file1", 0, 20),
+ ])
+ mock_str2file.assert_called_once_with("data1\ndata2\ndata3", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info(self, mock_debug2, mock_exists, mock_read):
+ mock_exists.return_value = True
+ mock_read.return_value = """
+VERSION_ID="20230629"
+PRETTY_NAME="openSUSE Tumbleweed"
+ANSI_COLOR="0;32"
+ """
+ res = utils.get_distro_info()
+ self.assertEqual(res, "openSUSE Tumbleweed")
+
+ @mock.patch('shutil.which')
+ @mock.patch('crmsh.report.utils.sh.LocalShell')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info_lsb(self, mock_debug2, mock_exists, mock_sh, mock_which):
+ mock_which.return_value = True
+ mock_exists.return_value = False
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_stdout_or_raise_error.return_value = "data"
+ res = utils.get_distro_info()
+ self.assertEqual(res, "Unknown")
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp_none(self, mock_get_timestamp):
+ mock_get_timestamp.side_effect = [None, None]
+ data = ["line1", "line2"]
+ self.assertIsNone(utils.find_first_timestamp(data, "file1"))
+ mock_get_timestamp.assert_has_calls([
+ mock.call("line1", "file1"),
+ mock.call("line2", "file1")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp(self, mock_get_timestamp):
+ mock_get_timestamp.return_value = 123456
+ data = ["line1", "line2"]
+ res = utils.find_first_timestamp(data, "file1")
+ self.assertEqual(res, 123456)
+ mock_get_timestamp.assert_called_once_with("line1", "file1")
+
+ def test_filter_lines(self):
+ data = """line1
+line2
+line3
+line4
+line5
+ """
+ res = utils.filter_lines(data, 2, 4)
+ self.assertEqual(res, 'line2\nline3\nline4\n')
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_none(self, mock_head, mock_parse):
+ mock_head.return_value = ["line1", "line2"]
+ mock_parse.side_effect = [None, None]
+ data = """line1
+line2
+ """
+ self.assertIsNone(utils.determin_log_format(data))
+
+ def test_determin_log_format_rfc5424(self):
+ data = """
+2003-10-11T22:14:15.003Z mymachine.example.com su
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "rfc5424")
+
+ def test_determin_log_format_syslog(self):
+ data = """
+Feb 12 18:30:08 15sp1-1 kernel:
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "syslog")
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_legacy(self, mock_head, mock_parse):
+ mock_head.return_value = ["Legacy 2003-10-11T22:14:15.003Z log"]
+ mock_parse.side_effect = [None, None, 123456]
+ data = """
+Legacy 003-10-11T22:14:15.003Z log data log
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "legacy")
+ mock_parse.assert_has_calls([
+ mock.call("Legacy 2003-10-11T22:14:15.003Z log", quiet=True),
+ mock.call("Legacy", quiet=True),
+ mock.call("2003-10-11T22:14:15.003Z", quiet=True)
+ ])
+
+ def test_get_timestamp_none(self):
+ self.assertIsNone(utils.get_timestamp("", "file1"))
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_rfc5424(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "rfc5424"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("2003-10-11T22:14:15.003Z mymachine.example.com su", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "rfc5424", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_syslog(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "syslog"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("Feb 12 18:30:08 15sp1-1 kernel:", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("Feb 12 18:30:08", "syslog", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_legacy(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "legacy"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("legacy 2003-10-11T22:14:15.003Z log data", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "legacy", "file1")
+
+ @mock.patch('crmsh.report.utils.diff_check')
+ def test_do_compare(self, mock_diff):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", node_list=["node1", "node2"])
+ mock_diff.side_effect = [[0, ""], [0, ""]]
+ rc, out = utils.do_compare(mock_ctx_inst, "file1")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_diff.assert_called_once_with("/opt/workdir/node1/file1", "/opt/workdir/node2/file1")
+
+ @mock.patch('os.path.isfile')
+ def test_diff_check_return(self, mock_isfile):
+ mock_isfile.return_value = False
+ rc, out = utils.diff_check("/opt/file1", "/opt/fil2")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "/opt/file1 does not exist\n")
+
+ @mock.patch('crmsh.report.utils.cib_diff')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isfile')
+ def test_diff_check(self, mock_isfile, mock_basename, mock_cib_diff):
+ mock_isfile.side_effect = [True, True]
+ mock_basename.return_value = "cib.xml"
+ mock_cib_diff.return_value = (0, "")
+ rc, out = utils.diff_check("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_txt_diff(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.txt_diff("txt1", "txt2")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('os.path.isfile')
+ def test_cib_diff_not_running(self, mock_isfile):
+ mock_isfile.side_effect = [True, False, False, True]
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "Can't compare cibs from running and stopped systems\n")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ @mock.patch('os.path.isfile')
+ def test_cib_diff(self, mock_isfile, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_isfile.side_effect = [True, True]
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_diff -c -n /opt/node1/cib.xml -o /opt/node2/cib.xml")
+
+ @mock.patch('os.symlink')
+ @mock.patch('shutil.move')
+ @mock.patch('os.remove')
+ @mock.patch('os.path.isfile')
+ def test_consolidate(self, mock_isfile, mock_remove, mock_move, mock_symlink):
+ mock_isfile.side_effect = [True, False]
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], work_dir="/opt/workdir")
+ utils.consolidate(mock_ctx_inst, "target_file")
+ mock_isfile.assert_has_calls([
+ mock.call("/opt/workdir/target_file"),
+ mock.call("/opt/workdir/target_file")
+ ])
+ mock_symlink.assert_has_calls([
+ mock.call('../target_file', '/opt/workdir/node1/target_file'),
+ mock.call('../target_file', '/opt/workdir/node2/target_file')
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer')
+ def test_do_sanitize(self, mock_sanitizer):
+ mock_inst = mock.Mock()
+ mock_sanitizer.return_value = mock_inst
+ mock_ctx_inst = mock.Mock()
+ utils.do_sanitize(mock_ctx_inst)
+ mock_inst.prepare.assert_called_once_with()
+ mock_inst.sanitize.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_empty(self, mock_read):
+ mock_read.return_value = ""
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_none(self, mock_read, mock_findln):
+ mock_read.return_value = "data"
+ mock_findln.return_value = None
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.filter_lines')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg(self, mock_read, mock_findln, mock_logger, mock_filter):
+ mock_read.return_value = "line1\nline2\nline3"
+ mock_filter.return_value = "line1\nline2\nline3"
+ res = utils.print_logseg("log1", 0, 0)
+ self.assertEqual(res, mock_filter.return_value)
+ mock_logger.debug2.assert_called_once_with("Including segment [%d-%d] from %s", 1, 3, "log1")
+
+ def test_head(self):
+ data = "line1\nline2\nline3"
+ res = utils.head(2, data)
+ self.assertEqual(res, ["line1", "line2"])
+
+ def test_tail(self):
+ data = "line1\nline2\nline3"
+ res = utils.tail(2, data)
+ self.assertEqual(res, ["line2", "line3"])
+
+ @mock.patch('crmsh.utils.get_open_method')
+ @mock.patch('builtins.open', create=True)
+ def test_write_to_file(self, mock_open, mock_method):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with('Hello')
+
+ @mock.patch('gzip.open')
+ @mock.patch('crmsh.utils.get_open_method')
+ def test_write_to_file_encode(self, mock_method, mock_open):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with(b'Hello')
+
+ @mock.patch('crmsh.report.utils.dt_to_str')
+ @mock.patch('crmsh.report.utils.ts_to_dt')
+ def test_ts_to_str(self, mock_ts_to_dt, mock_dt_to_str):
+ mock_ts_to_dt.return_value = datetime.datetime(2020, 2, 19, 21, 44, 7, 977355)
+ mock_dt_to_str.return_value = "2020-02-19 21:44"
+ res = utils.ts_to_str(1693519260.0)
+ self.assertEqual(res, mock_dt_to_str.return_value)
+
+ def test_ts_to_dt(self):
+ res = utils.ts_to_dt(crmutils.parse_to_timestamp("2023-09-01 06:01"))
+ self.assertEqual(utils.dt_to_str(res), "2023-09-01 06:01:00")
+
+ def test_now(self):
+ expected_res = datetime.datetime.now().strftime(constants.TIME_FORMAT)
+ res = utils.now()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.utils.now')
+ def test_create_description_template(self, mock_now, mock_isfile, mock_read, mock_str2file):
+ mock_now.return_value = "2023-09-01 06:01"
+ sys.argv = ["crm", "report", "option1"]
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/workdir")
+ mock_isfile.return_value = True
+ mock_read.return_value = "data"
+ utils.create_description_template(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.extract_critical_log')
+ @mock.patch('crmsh.report.utils.check_collected_files')
+ @mock.patch('crmsh.report.utils.compare_and_consolidate_files')
+ @mock.patch('glob.glob')
+ def test_analyze(self, mock_glob, mock_compare, mock_check_collected, mock_extract, mock_str2file):
+ mock_compare.return_value = "data"
+ mock_check_collected.return_value = ""
+ mock_extract.return_value = ""
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ utils.analyze(mock_ctx_inst)
+ mock_str2file.assert_called_once_with("data", f"/opt/work_dir/{constants.ANALYSIS_F}")
+
+ @mock.patch('crmsh.report.utils.consolidate')
+ @mock.patch('crmsh.report.utils.do_compare')
+ @mock.patch('glob.glob')
+ def test_compare_and_consolidate_files(self, mock_glob, mock_compare, mock_consolidate):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_glob.side_effect = [False, True, True, True, True]
+ mock_compare.side_effect = [(0, ""), (0, ""), (0, ""), (0, "")]
+ res = utils.compare_and_consolidate_files(mock_ctx_inst)
+ self.assertEqual(f"Diff {constants.MEMBERSHIP_F}... no {constants.MEMBERSHIP_F} found in /opt/work_dir\nDiff {constants.CRM_MON_F}... OK\nDiff {constants.COROSYNC_F}... OK\nDiff {constants.SYSINFO_F}... OK\nDiff {constants.CIB_F}... OK\n\n", res)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('crmsh.utils.file_is_empty')
+ @mock.patch('os.path.isfile')
+ def test_check_collected_files(self, mock_isfile, mock_is_empty, mock_read):
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work_dir")
+ mock_isfile.side_effect = [False, False, True]
+ mock_is_empty.return_value = False
+ mock_read.return_value = "data"
+ res = utils.check_collected_files(mock_ctx_inst)
+ self.assertEqual(res, ["Checking problems with permissions/ownership at node1:", "data"])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp_none(self, mock_parse, mock_error):
+ mock_parse.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ utils.parse_to_timestamp("xxxxx")
+ mock_error.assert_has_calls([
+ mock.call(f"Invalid time string 'xxxxx'"),
+ mock.call('Try these formats like: 2pm; "2019/9/5 12:30"; "09-Sep-07 2:00"; "[1-9][0-9]*[YmdHM]"')
+ ])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp(self, mock_parse, mock_error):
+ mock_parse.return_value = 1234567
+ res = utils.parse_to_timestamp("2023")
+ self.assertEqual(res, mock_parse.return_value)
+
+ def test_parse_to_timestamp_delta(self):
+ timedelta = datetime.timedelta(days=10)
+ expected_timestamp = (datetime.datetime.now() - timedelta).timestamp()
+ res = utils.parse_to_timestamp("10d")
+ self.assertEqual(int(res), int(expected_timestamp))
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('glob.glob')
+ def test_extract_critical_log(self, mock_glob, mock_read):
+ mock_glob.return_value = ["/opt/workdir/pacemaker.log"]
+ mock_read.return_value = """
+line1
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource
+line4
+ """
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ res = utils.extract_critical_log(mock_ctx_inst)
+ expected_data = """
+WARNINGS or ERRORS in pacemaker.log:
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource"""
+ self.assertEqual('\n'.join(res), expected_data)
+
+ def test_findln_by_timestamp_1(self):
+ pacemaker_file_path = "pacemaker.log.2"
+ with open(pacemaker_file_path) as f:
+ data = f.read()
+ data_list = data.split('\n')
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ first_timestamp = utils.get_timestamp(data_list[0], pacemaker_file_path)
+ middle_timestamp = utils.get_timestamp(data_list[1], pacemaker_file_path)
+ last_timestamp = utils.get_timestamp(data_list[2], pacemaker_file_path)
+ assert first_timestamp < middle_timestamp < last_timestamp
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 11:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 2
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 12:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 3
+
+ def test_findln_by_timestamp_irregular(self):
+ data = """line1
+ line2
+ line3
+ line4"""
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time)
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, "file1")
+ self.assertIsNone(result_line)
+
+ def test_findln_by_timestamp(self):
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ with open('pacemaker.log') as f:
+ data = f.read()
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ pacemaker_file_path = "pacemaker.log"
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_line_stamp = utils.get_timestamp(data.split('\n')[result_line-1], pacemaker_file_path)
+ assert result_line_stamp > target_time_stamp
+ result_pre_line_stamp = utils.get_timestamp(data.split('\n')[result_line-2], pacemaker_file_path)
+ assert result_pre_line_stamp < target_time_stamp
+
+ target_time = "Apr 03 11:01:19"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_time = ' '.join(data.split('\n')[result_line-1].split()[:3])
+ self.assertEqual(result_time, target_time)
+
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_not_syslog(self, mock_parse):
+ mock_parse.return_value = 123456
+ res = utils.get_timestamp_from_time_line("line1", "rfc5424", "file1")
+ self.assertEqual(res, mock_parse.return_value)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_next_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2024, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertIsNone(res)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_that_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2022, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertEqual(res, mock_parse.return_value)
diff --git a/test/unittests/test_sbd.py b/test/unittests/test_sbd.py
new file mode 100644
index 0000000..bc2b50a
--- /dev/null
+++ b/test/unittests/test_sbd.py
@@ -0,0 +1,894 @@
+import os
+import unittest
+import logging
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+
+
+class TestSBDTimeout(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDTimeout
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ _dict = {"sbd.watchdog_timeout": 5, "sbd.msgwait": 10}
+ _inst_q = mock.Mock()
+ self.sbd_timeout_inst = sbd.SBDTimeout(mock.Mock(profiles_dict=_dict, is_s390=True, qdevice_inst=_inst_q))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_initialize_timeout(self):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout = mock.Mock()
+ self.sbd_timeout_inst._set_sbd_msgwait = mock.Mock()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice = mock.Mock()
+ self.sbd_timeout_inst.initialize_timeout()
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout.assert_called_once()
+ self.sbd_timeout_inst._set_sbd_msgwait.assert_not_called()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice.assert_called_once()
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_watchdog_timeout(self, mock_warn):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to %d for s390, it was %d", sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390, 5)
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_msgwait(self, mock_warn):
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ self.sbd_timeout_inst._set_sbd_msgwait()
+ mock_warn.assert_called_once_with("sbd msgwait is set to %d, it was %d", 30, 10)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_qdevice_sync_timeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_sbd_stage(self, mock_is_configured, mock_is_active, mock_get_sync, mock_warn):
+ mock_is_configured.return_value = True
+ mock_is_active.return_value = True
+ mock_get_sync.return_value = 15
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 20 for qdevice, it was 5")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_all(self, mock_is_configured, mock_warn):
+ mock_is_configured.return_value = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 35 for qdevice, it was 5")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait_exception(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ self.assertEqual("Cannot get sbd msgwait for /dev/sda1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait(self, mock_run):
+ mock_run.return_value = """
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ res = sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ assert res == 10
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout_exception(self, mock_get):
+ mock_get.return_value = None
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ self.assertEqual("Cannot get the value of SBD_WATCHDOG_TIMEOUT", str(err.exception))
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout(self, mock_get):
+ mock_get.return_value = 5
+ res = sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ assert res == 5
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout_return(self, mock_active):
+ mock_active.return_value = False
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == sbd.SBDTimeout.STONITH_WATCHDOG_TIMEOUT_DEFAULT
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout(self, mock_active, mock_get_property):
+ mock_active.return_value = True
+ mock_get_property.return_value = "60s"
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == 60
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.utils.get_pcmk_delay_max')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_msgwait')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations(self, mock_2node, mock_get_sbd_dev, mock_get_msgwait, mock_pcmk_delay, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = ["/dev/sda1"]
+ mock_get_msgwait.return_value = 30
+ mock_pcmk_delay.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_msgwait.assert_called_once_with("/dev/sda1")
+ mock_pcmk_delay.assert_called_once_with(True)
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations_diskless(self, mock_2node, mock_get_sbd_dev, mock_get_watchdog_timeout, mock_get_stonith_watchdog_timeout, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = []
+ mock_get_watchdog_timeout.return_value = 30
+ mock_get_stonith_watchdog_timeout.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_watchdog_timeout.assert_called_once_with()
+ mock_get_stonith_watchdog_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 83
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected_diskless(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.stonith_watchdog_timeout = -1
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 71
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected_diskless(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_is_sbd_delay_start(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "100"
+ assert sbd.SBDTimeout.is_sbd_delay_start() is True
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start_return(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "100"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "no"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_called_once_with({"SBD_DELAY_START": "100"})
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_no(self, mock_get_sbd_value, mock_run):
+ mock_get_sbd_value.return_value = "no"
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_not_called()
+
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_return(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp):
+ mock_get_sbd_value.return_value = "10"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_not_called()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp, mock_str2file, mock_csync2, mock_cluster_run):
+ mock_get_sbd_value.return_value = "100"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_str2file.assert_called_once_with('[Service]\nTimeoutSec=120', '/etc/systemd/system/sbd.service.d/sbd_delay_start.conf')
+ mock_csync2.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_cluster_run.assert_called_once_with("systemctl daemon-reload")
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig_yes(self, mock_get_sbd_value, mock_get_sbd_timeout):
+ mock_get_sbd_value.return_value = "yes"
+ mock_get_sbd_timeout.return_value = 30
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 60
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "30"
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 30
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+
+class TestSBDManager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.sbd_inst = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1"], diskless_sbd=False))
+ self.sbd_inst_devices_gt_3 = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]))
+ self.sbd_inst_interactive = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=False))
+ self.sbd_inst_diskless = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=True))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_yes_to_all(self, mock_warn):
+ self.sbd_inst._context = mock.Mock(yes_to_all=True)
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_warn.assert_called_once_with(sbd.SBDManager.SBD_WARNING)
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_not_confirm(self, mock_warn, mock_status, mock_confirm):
+ self.sbd_inst._context.yes_to_all = False
+ mock_confirm.return_value = False
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_warn.assert_called_once_with("Not configuring SBD - STONITH will be disabled.")
+
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_already_configured(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_from_config.return_value = ["/dev/sda1"]
+ mock_no_overwrite.return_value = True
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sda1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ ])
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_called_once_with('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_null_and_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_called_once_with("Do you wish to use SBD?")
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+ ])
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt, mock_verify, mock_error_msg, mock_warn, mock_split):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False, True]
+ mock_from_config.return_value = []
+ mock_no_overwrite.return_value = False
+ mock_prompt.side_effect = ["/dev/test1", "/dev/sda1", "/dev/sdb1"]
+ mock_split.side_effect = [["/dev/test1"], ["/dev/sda1"], ["/dev/sdb1"]]
+ mock_verify.side_effect = [ValueError("/dev/test1 error"), None, None]
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sdb1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ mock.call("Are you sure you wish to use this device?")
+ ])
+ mock_from_config.assert_called_once_with()
+ mock_error_msg.assert_called_once_with("/dev/test1 error")
+ mock_warn.assert_has_calls([
+ mock.call("All data on /dev/sda1 will be destroyed!"),
+ mock.call("All data on /dev/sdb1 will be destroyed!")
+ ])
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*') for x in range(3)
+ ])
+ mock_split.assert_has_calls([
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/test1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sda1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sdb1"),
+ ])
+
+ def test_verify_sbd_device_gt_3(self):
+ assert self.sbd_inst_devices_gt_3.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]
+ dev_list = self.sbd_inst_devices_gt_3.sbd_devices_input
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst_devices_gt_3._verify_sbd_device(dev_list)
+ self.assertEqual("Maximum number of SBD device is 3", str(err.exception))
+
+ @mock.patch('crmsh.sbd.SBDManager._compare_device_uuid')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_sbd_device_not_block(self, mock_block_device, mock_compare):
+ assert self.sbd_inst.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1"]
+ dev_list = self.sbd_inst.sbd_devices_input
+ mock_block_device.side_effect = [True, False]
+
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._verify_sbd_device(dev_list)
+ self.assertEqual("/dev/sdc1 doesn't look like a block device", str(err.exception))
+
+ mock_block_device.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdc1")])
+ mock_compare.assert_called_once_with("/dev/sdb1", [])
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ def test_get_sbd_device_from_option(self, mock_verify):
+ self.sbd_inst._get_sbd_device()
+ mock_verify.assert_called_once_with(['/dev/sdb1', '/dev/sdc1'])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_interactive')
+ def test_get_sbd_device_from_interactive(self, mock_interactive):
+ mock_interactive.return_value = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst_interactive._get_sbd_device()
+ mock_interactive.assert_called_once_with()
+
+ def test_get_sbd_device_diskless(self):
+ self.sbd_inst_diskless._get_sbd_device()
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd_return(self, mock_info, mock_sbd_timeout):
+ mock_inst = mock.Mock()
+ mock_sbd_timeout.return_value = mock_inst
+ self.sbd_inst_diskless._context = mock.Mock(profiles_dict={})
+ self.sbd_inst_diskless._initialize_sbd()
+ mock_info.assert_called_once_with("Configuring diskless SBD")
+ mock_inst.initialize_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd(self, mock_info, mock_sbd_timeout, mock_invoke, mock_error):
+ mock_inst = mock.Mock(sbd_msgwait=10, sbd_watchdog_timeout=5)
+ mock_sbd_timeout.return_value = mock_inst
+ mock_inst.set_sbd_watchdog_timeout = mock.Mock()
+ mock_inst.set_sbd_msgwait = mock.Mock()
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ mock_invoke.side_effect = [(True, None, None), (False, None, "error")]
+ mock_error.side_effect = ValueError
+
+ with self.assertRaises(ValueError):
+ self.sbd_inst._initialize_sbd()
+
+ mock_invoke.assert_has_calls([
+ mock.call("sbd -4 10 -1 5 -d /dev/sdb1 create"),
+ mock.call("sbd -4 10 -1 5 -d /dev/sdc1 create")
+ ])
+ mock_error.assert_called_once_with("Failed to initialize SBD device /dev/sdc1: error")
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('shutil.copyfile')
+ def test_update_configuration(self, mock_copy, mock_sysconfig, mock_update):
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst._watchdog_inst = mock.Mock(watchdog_device_name="/dev/watchdog")
+ self.sbd_inst.timeout_inst = mock.Mock(sbd_watchdog_timeout=15)
+
+ self.sbd_inst._update_sbd_configuration()
+
+ mock_copy.assert_called_once_with("/usr/share/fillup-templates/sysconfig.sbd", "/etc/sysconfig/sbd")
+ mock_sysconfig.assert_called_once_with("/etc/sysconfig/sbd", SBD_WATCHDOG_DEV='/dev/watchdog', SBD_DEVICE='/dev/sdb1;/dev/sdc1', SBD_WATCHDOG_TIMEOUT="15")
+ mock_update.assert_called_once_with("/etc/sysconfig/sbd")
+
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config_none(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = None
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == []
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config(self, mock_parse, mock_split):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/sdb1;/dev/sdc1"
+ mock_split.return_value = ["/dev/sdb1", "/dev/sdc1"]
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == ["/dev/sdb1", "/dev/sdc1"]
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+ mock_split.assert_called_once_with(sbd.SBDManager.PARSE_RE, "/dev/sdb1;/dev/sdc1")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_diskless(self, mock_vote, mock_warn):
+ self.sbd_inst_diskless._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst_diskless._warn_diskless_sbd()
+ mock_vote.assert_not_called()
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_peer(self, mock_vote, mock_warn):
+ mock_vote.return_value = {'Expected': '1'}
+ self.sbd_inst_diskless._warn_diskless_sbd("node2")
+ mock_vote.assert_called_once_with("node2")
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.sbd_init()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_return(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_invoke):
+ mock_package.return_value = True
+ self.sbd_inst._sbd_devices = None
+ self.sbd_inst.diskless_sbd = False
+ self.sbd_inst._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+
+ self.sbd_inst.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_not_called()
+ mock_update.assert_not_called()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._enable_sbd_service')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_warn, mock_enable_sbd):
+ mock_package.return_value = True
+ self.sbd_inst_diskless._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+ self.sbd_inst_diskless.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_called_once_with()
+ mock_update.assert_called_once_with()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_warn.assert_called_once_with()
+ mock_enable_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_no_ra_running(self, mock_parser, mock_status, mock_cluster_run, mock_wait, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = False
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_status.assert_called_once_with("Restarting cluster service")
+ mock_cluster_run.assert_called_once_with("crm cluster restart")
+ mock_wait.assert_called_once_with()
+ mock_config_sbd_ra.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_diskless(self, mock_parser, mock_warn, mock_get_timeout):
+ mock_parser().is_any_resource_running.return_value = True
+ mock_get_timeout.return_value = 60
+ self.sbd_inst_diskless.timeout_inst = mock.Mock(stonith_watchdog_timeout=-1)
+ self.sbd_inst_diskless._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ mock.call("Then run \"crm configure property stonith-enabled=true stonith-watchdog-timeout=-1 stonith-timeout=60\" on any node")
+ ])
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed(self, mock_parser, mock_warn, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = True
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_enable_sbd_service_init(self, mock_invoke):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst._enable_sbd_service()
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._restart_cluster_and_configure_sbd_ra')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ def test_enable_sbd_service_restart(self, mock_cluster_run, mock_restart):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._enable_sbd_service()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable sbd.service"),
+ ])
+ mock_restart.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.configure_sbd_resource_and_properties()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sbd.SBDTimeout.adjust_sbd_timeout_related_cluster_configuration')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties(
+ self,
+ mock_package, mock_enabled, mock_parser, mock_run, mock_set_property, sbd_adjust, mock_is_active,
+ ):
+ mock_package.return_value = True
+ mock_enabled.return_value = True
+ mock_parser().is_resource_configured.return_value = False
+ mock_is_active.return_value = False
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._get_sbd_device_from_config = mock.Mock()
+ self.sbd_inst._get_sbd_device_from_config.return_value = ["/dev/sda1"]
+
+ self.sbd_inst.configure_sbd_resource_and_properties()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_run.assert_called_once_with("crm configure primitive {} {}".format(sbd.SBDManager.SBD_RA_ID, sbd.SBDManager.SBD_RA))
+ mock_set_property.assert_called_once_with("stonith-enabled", "true")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_exist(self, mock_package, mock_exists, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_disabled(self, mock_package, mock_exists, mock_enabled, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = False
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_verify, mock_status):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb1"]
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sdb1"], ["node1"])
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_warn, mock_status, mock_set):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = []
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_warn.assert_called_once_with("node1")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got diskless SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod_exception(self, mock_get_config):
+ mock_get_config.return_value = []
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDManager.verify_sbd_device()
+ self.assertEqual("No sbd device configured", str(err.exception))
+ mock_get_config.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod(self, mock_get_config, mock_list_nodes, mock_verify):
+ mock_get_config.return_value = ["/dev/sda1"]
+ mock_list_nodes.return_value = ["node1"]
+ sbd.SBDManager.verify_sbd_device()
+ mock_get_config.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sda1"], ["node1"])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid_return(self, mock_get_uuid):
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", None)
+ mock_get_uuid.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid(self, mock_get_uuid):
+ mock_get_uuid.side_effect = ["1234", "5678"]
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", ["node1"])
+ self.assertEqual("Device /dev/sdb1 doesn't have the same UUID with node1", str(err.exception))
+ mock_get_uuid.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdb1", "node1")])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid_not_match(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._get_device_uuid("/dev/sdb1")
+ self.assertEqual("Cannot find sbd device UUID for /dev/sdb1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sdb1 dump", None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid(self, mock_run):
+ output = """
+ ==Dumping header on disk /dev/sda1
+ Header version : 2.1
+ UUID : a2e9a92c-cc72-4ef9-ac55-ccc342f3546b
+ Number of slots : 255
+ Sector size : 512
+ Timeout (watchdog) : 5
+ Timeout (allocate) : 2
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ mock_run.return_value = output
+ res = self.sbd_inst._get_device_uuid("/dev/sda1", node="node1")
+ self.assertEqual(res, "a2e9a92c-cc72-4ef9-ac55-ccc342f3546b")
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump", "node1")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_true(self, mock_context, mock_is_active, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = []
+ mock_is_active.return_value = True
+ assert sbd.SBDManager.is_using_diskless_sbd() is True
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+ mock_is_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_false(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.is_using_diskless_sbd() is False
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_get_sbd_device_from_config_classmethod(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.get_sbd_device_from_config() == ["/dev/sda1"]
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ def test_update_configuration_static(self, mock_config_set, mock_csync2):
+ sbd_config_dict = {
+ "SBD_PACEMAKER": "yes",
+ "SBD_STARTMODE": "always",
+ "SBD_DELAY_START": "no",
+ }
+ self.sbd_inst.update_configuration(sbd_config_dict)
+ mock_config_set.assert_called_once_with(bootstrap.SYSCONFIG_SBD, **sbd_config_dict)
+ mock_csync2.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
diff --git a/test/unittests/test_scripts.py b/test/unittests/test_scripts.py
new file mode 100644
index 0000000..04c74e2
--- /dev/null
+++ b/test/unittests/test_scripts.py
@@ -0,0 +1,914 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from builtins import str
+from builtins import object
+from os import path
+from pprint import pprint
+import pytest
+from lxml import etree
+from crmsh import scripts
+from crmsh import ra
+from crmsh import utils
+
+scripts._script_dirs = lambda: [path.join(path.dirname(__file__), 'scripts')]
+
+_apache = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="apache">
+<version>1.0</version>
+
+<longdesc lang="en">
+This is the resource agent for the Apache Web server.
+This resource agent operates both version 1.x and version 2.x Apache
+servers.
+
+The start operation ends with a loop in which monitor is
+repeatedly called to make sure that the server started and that
+it is operational. Hence, if the monitor operation does not
+succeed within the start operation timeout, the apache resource
+will end with an error status.
+
+The monitor operation by default loads the server status page
+which depends on the mod_status module and the corresponding
+configuration file (usually /etc/apache2/mod_status.conf).
+Make sure that the server status page works and that the access
+is allowed *only* from localhost (address 127.0.0.1).
+See the statusurl and testregex attributes for more details.
+
+See also http://httpd.apache.org/
+</longdesc>
+<shortdesc lang="en">Manages an Apache Web server instance</shortdesc>
+
+<parameters>
+<parameter name="configfile" required="0" unique="1">
+<longdesc lang="en">
+The full pathname of the Apache configuration file.
+This file is parsed to provide defaults for various other
+resource agent parameters.
+</longdesc>
+<shortdesc lang="en">configuration file path</shortdesc>
+<content type="string" default="$(detect_default_config)" />
+</parameter>
+
+<parameter name="httpd">
+<longdesc lang="en">
+The full pathname of the httpd binary (optional).
+</longdesc>
+<shortdesc lang="en">httpd binary path</shortdesc>
+<content type="string" default="/usr/sbin/httpd" />
+</parameter>
+
+<parameter name="port" >
+<longdesc lang="en">
+A port number that we can probe for status information
+using the statusurl.
+This will default to the port number found in the
+configuration file, or 80, if none can be found
+in the configuration file.
+
+</longdesc>
+<shortdesc lang="en">httpd port</shortdesc>
+<content type="integer" />
+</parameter>
+
+<parameter name="statusurl">
+<longdesc lang="en">
+The URL to monitor (the apache server status page by default).
+If left unspecified, it will be inferred from
+the apache configuration file.
+
+If you set this, make sure that it succeeds *only* from the
+localhost (127.0.0.1). Otherwise, it may happen that the cluster
+complains about the resource being active on multiple nodes.
+</longdesc>
+<shortdesc lang="en">url name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex">
+<longdesc lang="en">
+Regular expression to match in the output of statusurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">monitor regular expression</shortdesc>
+<content type="string" default="exists, but impossible to show in a human readable format (try grep testregex)"/>
+</parameter>
+
+<parameter name="client">
+<longdesc lang="en">
+Client to use to query to Apache. If not specified, the RA will
+try to find one on the system. Currently, wget and curl are
+supported. For example, you can set this parameter to "curl" if
+you prefer that to wget.
+</longdesc>
+<shortdesc lang="en">http client</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="testurl">
+<longdesc lang="en">
+URL to test. If it does not start with "http", then it's
+considered to be relative to the Listen address.
+</longdesc>
+<shortdesc lang="en">test url</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex10">
+<longdesc lang="en">
+Regular expression to match in the output of testurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">extended monitor regular expression</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testconffile">
+<longdesc lang="en">
+A file which contains test configuration. Could be useful if
+you have to check more than one web application or in case sensitive
+info should be passed as arguments (passwords). Furthermore,
+using a config file is the only way to specify certain
+parameters.
+
+Please see README.webapps for examples and file description.
+</longdesc>
+<shortdesc lang="en">test configuration file</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testname">
+<longdesc lang="en">
+Name of the test within the test configuration file.
+</longdesc>
+<shortdesc lang="en">test name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="options">
+<longdesc lang="en">
+Extra options to apply when starting apache. See man httpd(8).
+</longdesc>
+<shortdesc lang="en">command line options</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="envfiles">
+<longdesc lang="en">
+Files (one or more) which contain extra environment variables.
+If you want to prevent script from reading the default file, set
+this parameter to empty string.
+</longdesc>
+<shortdesc lang="en">environment settings files</shortdesc>
+<content type="string" default="/etc/apache2/envvars"/>
+</parameter>
+
+<parameter name="use_ipv6">
+<longdesc lang="en">
+We will try to detect if the URL (for monitor) is IPv6, but if
+that doesn't work set this to true to enforce IPv6.
+</longdesc>
+<shortdesc lang="en">use ipv6 with http clients</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="60s" />
+<action name="status" timeout="30s" />
+<action name="monitor" depth="0" timeout="20s" interval="10" />
+<action name="meta-data" timeout="5" />
+<action name="validate-all" timeout="5" />
+</actions>
+</resource-agent>
+'''
+
+_virtual_ip = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="IPaddr2">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource manages IP alias IP addresses.
+It can add an IP alias, or remove one.
+In addition, it can implement Cluster Alias IP functionality
+if invoked as a clone resource.
+
+If used as a clone, you should explicitly set clone-node-max &gt;= 2,
+and/or clone-max &lt; number of nodes. In case of node failure,
+clone instances need to be re-allocated on surviving nodes.
+This would not be possible if there is already an instance on those nodes,
+and clone-node-max=1 (which is the default).
+</longdesc>
+
+<shortdesc lang="en">Manages virtual IPv4 and IPv6 addresses (Linux specific version)</shortdesc>
+
+<parameters>
+<parameter name="ip" unique="1" required="1">
+<longdesc lang="en">
+The IPv4 (dotted quad notation) or IPv6 address (colon hexadecimal notation)
+example IPv4 "192.168.1.1".
+example IPv6 "2001:db8:DC28:0:0:FC57:D4C8:1FFF".
+</longdesc>
+<shortdesc lang="en">IPv4 or IPv6 address</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="nic" unique="0">
+<longdesc lang="en">
+The base network interface on which the IP address will be brought
+online.
+If left empty, the script will try and determine this from the
+routing table.
+
+Do NOT specify an alias interface in the form eth0:1 or anything here;
+rather, specify the base interface only.
+If you want a label, see the iflabel parameter.
+
+Prerequisite:
+
+There must be at least one static IP address, which is not managed by
+the cluster, assigned to the network interface.
+If you can not assign any static IP address on the interface,
+modify this kernel parameter:
+
+sysctl -w net.ipv4.conf.all.promote_secondaries=1 # (or per device)
+</longdesc>
+<shortdesc lang="en">Network interface</shortdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="cidr_netmask">
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0)
+
+If unspecified, the script will also try to determine this from the
+routing table.
+</longdesc>
+<shortdesc lang="en">CIDR netmask</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="broadcast">
+<longdesc lang="en">
+Broadcast address associated with the IP. If left empty, the script will
+determine this from the netmask.
+</longdesc>
+<shortdesc lang="en">Broadcast address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="iflabel">
+<longdesc lang="en">
+You can specify an additional label for your IP address here.
+This label is appended to your interface name.
+
+The kernel allows alphanumeric labels up to a maximum length of 15
+characters including the interface name and colon (e.g. eth0:foobar1234)
+
+A label can be specified in nic parameter but it is deprecated.
+If a label is specified in nic name, this parameter has no effect.
+</longdesc>
+<shortdesc lang="en">Interface label</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="lvs_support">
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+
+Notes for IPv6:
+It is not necessary to enable this option on IPv6.
+Instead, enable 'lvs_ipv6_addrlabel' option for LVS-DR usage on IPv6.
+</longdesc>
+<shortdesc lang="en">Enable support for LVS DR</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_support_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel">
+<longdesc lang="en">
+Enable adding IPv6 address label so IPv6 traffic originating from
+the address's interface does not use this address as the source.
+This is necessary for LVS-DR health checks to realservers to work. Without it,
+the most recently added IPv6 address (probably the address added by IPaddr2)
+will be used as the source address for IPv6 traffic from that interface and
+since that address exists on loopback on the realservers, the realserver
+response to pings/connections will never leave its loopback.
+See RFC3484 for the detail of the source address selection.
+
+See also 'lvs_ipv6_addrlabel_value' parameter.
+</longdesc>
+<shortdesc lang="en">Enable adding IPv6 address label.</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_ipv6_addrlabel_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel_value">
+<longdesc lang="en">
+Specify IPv6 address label value used when 'lvs_ipv6_addrlabel' is enabled.
+The value should be an unused label in the policy table
+which is shown by 'ip addrlabel list' command.
+You would rarely need to change this parameter.
+</longdesc>
+<shortdesc lang="en">IPv6 address label value.</shortdesc>
+<content type="integer" default="${OCF_RESKEY_lvs_ipv6_addrlabel_value_default}"/>
+</parameter>
+
+<parameter name="mac">
+<longdesc lang="en">
+Set the interface MAC address explicitly. Currently only used in case of
+the Cluster IP Alias. Leave empty to chose automatically.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP MAC address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="clusterip_hash">
+<longdesc lang="en">
+Specify the hashing algorithm used for the Cluster IP functionality.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP hashing function</shortdesc>
+<content type="string" default="${OCF_RESKEY_clusterip_hash_default}"/>
+</parameter>
+
+<parameter name="unique_clone_address">
+<longdesc lang="en">
+If true, add the clone ID to the supplied value of IP to create
+a unique address to manage
+</longdesc>
+<shortdesc lang="en">Create a unique address for cloned instances</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_unique_clone_address_default}"/>
+</parameter>
+
+<parameter name="arp_interval">
+<longdesc lang="en">
+Specify the interval between unsolicited ARP packets in milliseconds.
+</longdesc>
+<shortdesc lang="en">ARP packet interval in ms</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
+</parameter>
+
+<parameter name="arp_count">
+<longdesc lang="en">
+Number of unsolicited ARP packets to send.
+</longdesc>
+<shortdesc lang="en">ARP packet count</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_count_default}"/>
+</parameter>
+
+<parameter name="arp_bg">
+<longdesc lang="en">
+Whether or not to send the ARP packets in the background.
+</longdesc>
+<shortdesc lang="en">ARP from background</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
+</parameter>
+
+<parameter name="arp_mac">
+<longdesc lang="en">
+MAC address to send the ARP packets to.
+
+You really shouldn't be touching this.
+
+</longdesc>
+<shortdesc lang="en">ARP MAC</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_mac_default}"/>
+</parameter>
+
+<parameter name="arp_sender">
+<longdesc lang="en">
+The program to send ARP packets with on start. For infiniband
+interfaces, default is ipoibarping. If ipoibarping is not
+available, set this to send_arp.
+</longdesc>
+<shortdesc lang="en">ARP sender</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="flush_routes">
+<longdesc lang="en">
+Flush the routing table on stop. This is for
+applications which use the cluster IP address
+and which run on the same physical host that the
+IP address lives on. The Linux kernel may force that
+application to take a shortcut to the local loopback
+interface, instead of the interface the address
+is really bound to. Under those circumstances, an
+application may, somewhat unexpectedly, continue
+to use connections for some time even after the
+IP address is deconfigured. Set this parameter in
+order to immediately disable said shortcut when the
+IP address goes away.
+</longdesc>
+<shortdesc lang="en">Flush kernel routing table on stop</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+<actions>
+<action name="start" timeout="20s" />
+<action name="stop" timeout="20s" />
+<action name="status" depth="0" timeout="20s" interval="10s" />
+<action name="monitor" depth="0" timeout="20s" interval="10s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="20s" />
+</actions>
+</resource-agent>
+'''
+
+_saved_get_ra = ra.get_ra
+_saved_cluster_nodes = utils.list_cluster_nodes
+
+
+def setup_function():
+ "hijack ra.get_ra to add new resource class (of sorts)"
+ class Agent(object):
+ def __init__(self, name):
+ self.name = name
+
+ def meta(self):
+ if self.name == 'apache':
+ return etree.fromstring(_apache)
+ else:
+ return etree.fromstring(_virtual_ip)
+
+ def _get_ra(agent):
+ if agent.startswith('test:'):
+ return Agent(agent[5:])
+ return _saved_get_ra(agent)
+ ra.get_ra = _get_ra
+
+ utils.list_cluster_nodes = lambda: [utils.this_node(), 'a', 'b', 'c']
+
+
+def teardown_function():
+ ra.get_ra = _saved_get_ra
+ utils.list_cluster_nodes = _saved_cluster_nodes
+
+
+def test_list():
+ assert set(['v2', 'legacy', '10-webserver', 'inc1', 'inc2', 'vip', 'vipinc', 'unified']) == set(s for s in scripts.list_scripts())
+
+
+def test_load_legacy():
+ script = scripts.load_script('legacy')
+ assert script is not None
+ assert 'legacy' == script['name']
+ assert len(script['shortdesc']) > 0
+ pprint(script)
+ actions = scripts.verify(script, {}, external_check=False)
+ pprint(actions)
+ assert [{'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Configure SSH',
+ 'text': '',
+ 'value': 'configure.py ssh'},
+ {'longdesc': '',
+ 'name': 'collect',
+ 'shortdesc': 'Check state of nodes',
+ 'text': '',
+ 'value': 'collect.py'},
+ {'longdesc': '',
+ 'name': 'validate',
+ 'shortdesc': 'Verify parameters',
+ 'text': '',
+ 'value': 'verify.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Install packages',
+ 'text': '',
+ 'value': 'configure.py install'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Generate corosync authkey',
+ 'text': '',
+ 'value': 'authkey.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Configure cluster nodes',
+ 'text': '',
+ 'value': 'configure.py corosync'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Initialize cluster',
+ 'text': '',
+ 'value': 'init.py'}] == actions
+
+
+def test_load_workflow():
+ script = scripts.load_script('10-webserver')
+ assert script is not None
+ assert '10-webserver' == script['name']
+ assert len(script['shortdesc']) > 0
+
+
+def test_v2():
+ script = scripts.load_script('v2')
+ assert script is not None
+ assert 'v2' == script['name']
+ assert len(script['shortdesc']) > 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': False}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert str(actions[0]['text']).find('group www') >= 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': True}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 3
+
+
+def test_agent_include():
+ inc2 = scripts.load_script('inc2')
+ actions = scripts.verify(
+ inc2,
+ {'wiz': 'abc',
+ 'foo': 'cde',
+ 'included-script': {'foo': True, 'bar': 'bah bah'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 6
+ assert '33\n\nabc' == actions[-1]['text'].strip()
+
+
+def test_vipinc():
+ script = scripts.load_script('vipinc')
+ assert script is not None
+ actions = scripts.verify(
+ script,
+ {'vip': {'id': 'vop', 'ip': '10.0.0.4'}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'].find('primitive vop test:virtual-ip\n\tip="10.0.0.4"') >= 0
+ assert actions[0]['text'].find("clone c-vop vop") >= 0
+
+
+def test_value_replace_handles():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ value: bar
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: test-a
+ parameters:
+ - name: foo
+ value: "{{wiz}}+{{wiz}}"
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "{{test-a:foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "SARUMAN+SARUMAN"
+
+
+def test_optional_step_ref():
+ """
+ It seems I have a bug in referencing ids from substeps.
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ required: false
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "primitive {{wiz}} {{apache:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_a,
+ {"id": "apacho"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive apacho test:apache"
+
+ #import ipdb
+ #ipdb.set_trace()
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN", "apache": {"id": "apacho"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive SARUMAN apacho"
+
+
+def test_enums_basic():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "one"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "one"
+
+ actions = scripts.verify(script_a,
+ {"foo": "three"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "three"
+
+
+def test_enums_fail():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "wrong"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_enums_fail2():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "one"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_two_substeps():
+ """
+ There is a scoping bug
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ name: apache-a
+ required: true
+ - script: apache
+ name: apache-b
+ required: true
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - include: apache-a
+ - include: apache-b
+ - cib: "primitive {{wiz}} {{apache-a:id}} {{apache-b:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_b,
+ {'wiz': "head", "apache-a": {"id": "one"}, "apache-b": {"id": "two"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive one test:apache\n\nprimitive two test:apache\n\nprimitive head one two"
+
+
+def test_required_subscript_params():
+ """
+ If an optional subscript has multiple required parameters,
+ excluding all = ok
+ excluding one = fail
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ - name: bar
+ required: true
+ type: string
+ actions:
+ - cib: "{{foo}} {{bar}}"
+'''
+
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: foofoo
+ required: false
+ actions:
+ - include: foofoo
+ - cib: "{{foofoo:foo}} {{foofoo:bar}"
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ def ver():
+ actions = scripts.verify(script_b,
+ {"foofoo": {"foo": "one"}}, external_check=False)
+ pprint(actions)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_unified():
+ unified = scripts.load_script('unified')
+ actions = scripts.verify(
+ unified,
+ {'id': 'foo',
+ 'vip': {'id': 'bar', 'ip': '192.168.0.15'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert 'primitive bar IPaddr2 ip=192.168.0.15\ngroup g-foo foo bar' == actions[-1]['text'].strip()
+
+
+class TestPrinter(object):
+ def __init__(self):
+ import types
+ self.actions = []
+
+ def add_capture(name):
+ def capture(obj, *args):
+ obj.actions.append((name, args))
+ self.__dict__[name] = types.MethodType(capture, self)
+ for name in ('print_header', 'debug', 'error', 'start', 'flush', 'print_command', 'finish'):
+ add_capture(name)
+
+def test_inline_script():
+ """
+ Test inline script feature for call actions
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ actions:
+ - call: |
+ #!/bin/sh
+ echo "{{foo}}"
+ nodes: local
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "hello world"}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert actions[0]['name'] == 'call'
+ assert actions[0]['value'] == '#!/bin/sh\necho "hello world"'
+ tp = TestPrinter()
+ scripts.run(script_a,
+ {"foo": "hello world"}, tp)
+
+ for action, args in tp.actions:
+ print(action, args)
+ if action == 'finish':
+ assert args[0]['value'] == '#!/bin/sh\necho "hello world"'
+
+
+def test_when_expression():
+ """
+ Test when expressions
+ """
+ def runtest(when, val):
+ the_script = '''version: 2.2
+shortdesc: Test when expressions
+longdesc: See if more complicated expressions work
+parameters:
+ - name: stringtest
+ type: string
+ shortdesc: A test string
+actions:
+ - call: "echo '{{stringtest}}'"
+ when: %s
+'''
+ scrpt = scripts.load_script_string('{}_{}'.format(when, val), the_script % when)
+ assert scrpt is not None
+
+ a1 = scripts.verify(scrpt,
+ {"stringtest": val},
+ external_check=False)
+ pprint(a1)
+ return a1
+
+ a1 = runtest('stringtest == "balloon"', "balloon")
+ assert len(a1) == 1 and a1[0]['value'] == "echo 'balloon'"
+
+ a1 = runtest('stringtest == "balloon"', "not a balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest != "balloon"', "not a balloon")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest != "balloon"', "balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest == "{{dry_run}}"', "no")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest == "yes" or stringtest == "no"', "yes")
+ assert len(a1) == 1
diff --git a/test/unittests/test_service_manager.py b/test/unittests/test_service_manager.py
new file mode 100644
index 0000000..082fc3c
--- /dev/null
+++ b/test/unittests/test_service_manager.py
@@ -0,0 +1,84 @@
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.service_manager import ServiceManager
+
+
+@mock.patch("crmsh.service_manager.ServiceManager._call_with_parallax")
+class TestServiceManager(unittest.TestCase):
+ """
+ Unitary tests for class ServiceManager
+ """
+
+ def setUp(self) -> None:
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._call = mock.Mock(self.service_manager._call)
+
+ def test_call_single_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 0
+ self.assertEqual(['node1'], self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_single_node_failure(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 1
+ self.assertEqual(list(), self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_multiple_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ mock_call_with_parallax.return_value = {'node1': (0, '', ''), 'node2': (1, 'out', 'err')}
+ self.assertEqual(['node1'], self.service_manager._call(None, ['node1', 'node2'], 'foo'))
+ self.service_manager._run_on_single_host.assert_not_called()
+ mock_call_with_parallax.assert_called_once_with('foo', ['node1', 'node2'])
+
+ def test_run_on_single_host_return_1(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (1, 'bar', 'err')
+ self.assertEqual(1, self.service_manager._run_on_single_host('foo', 'node1'))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_run_on_single_host_return_255(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (255, 'bar', 'err')
+ with self.assertRaises(ValueError):
+ self.service_manager._run_on_single_host('foo', 'node1')
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_start_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl start 'service1'")
+
+ def test_start_service_on_multiple_host(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1', 'node2']
+ self.assertEqual(['node1', 'node2'], self.service_manager.start_service('service1', node_list=['node1', 'node2']))
+ self.service_manager._call.assert_called_once_with(None, ['node1', 'node2'], "systemctl start 'service1'")
+
+ def test_start_and_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', enable=True, remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable --now 'service1'")
+
+ def test_stop_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.stop_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl stop 'service1'")
+
+ def test_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.enable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable 'service1'")
+
+ def test_disable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.disable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl disable 'service1'")
diff --git a/test/unittests/test_sh.py b/test/unittests/test_sh.py
new file mode 100644
index 0000000..b3c0f0b
--- /dev/null
+++ b/test/unittests/test_sh.py
@@ -0,0 +1,189 @@
+import subprocess
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.user_of_host import UserOfHost
+
+
+class TestLocalShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.local_shell = crmsh.sh.LocalShell()
+ self.local_shell.get_effective_user_name = mock.Mock(self.local_shell.get_effective_user_name)
+ self.local_shell.geteuid = mock.Mock(self.local_shell.geteuid)
+ self.local_shell.hostname = mock.Mock(self.local_shell.hostname)
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'alice', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['su', 'alice', '--login', '-s', '/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_as_root(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_unauthorized(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'bob'
+ self.local_shell.geteuid.return_value = 1001
+ with self.assertRaises(crmsh.sh.AuthorizationError) as ctx:
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ self.assertIsInstance(ctx.exception, ValueError)
+
+ def test_get_stdout_stderr_decoded_and_stripped(self):
+ self.local_shell.get_rc_stdout_stderr_raw = mock.Mock(self.local_shell.get_rc_stdout_stderr_raw)
+ self.local_shell.get_rc_stdout_stderr_raw.return_value = 1, b' out \n', b'\terr\t\n'
+ rc, out, err = self.local_shell.get_rc_stdout_stderr('alice', 'foo', 'input')
+ self.assertEqual(1, rc)
+ self.assertEqual('out', out)
+ self.assertEqual('err', err)
+ self.local_shell.get_rc_stdout_stderr_raw.assert_called_once_with(
+ 'alice', 'foo', b'input',
+ )
+
+ def test_get_stdout_or_raise_error(self):
+ self.local_shell.su_subprocess_run = mock.Mock(self.local_shell.su_subprocess_run)
+ self.local_shell.su_subprocess_run.return_value = subprocess.CompletedProcess(
+ args=mock.Mock(),
+ returncode=1,
+ stdout=b'foo',
+ stderr=b' bar ',
+ )
+ with self.assertRaises(crmsh.sh.CommandFailure) as ctx:
+ self.local_shell.get_stdout_or_raise_error('root', 'foo')
+ self.assertIsInstance(ctx.exception, ValueError)
+
+
+class TestSSHShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.ssh_shell = crmsh.sh.SSHShell(mock.Mock(crmsh.sh.LocalShell), 'alice')
+ self.ssh_shell.local_shell.hostname.return_value = 'node1'
+ self.ssh_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.ssh_shell.local_shell.can_run_as.return_value = True
+
+ def test_can_run_as(self):
+ self.ssh_shell.local_shell.get_rc_and_error.return_value = 255, 'bar'
+ self.assertFalse(self.ssh_shell.can_run_as('node2', 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_not_called()
+
+ def test_can_run_as_local(self):
+ self.assertTrue(self.ssh_shell.can_run_as(None, 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_called_once_with('root')
+
+ def test_subprocess_run_without_input(self):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ args, kwargs = self.ssh_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar'
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+
+ @mock.patch('subprocess.run')
+ def test_subprocess_run_without_input_local(self, mock_run):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node1', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ mock_run.assert_called_once_with(
+ ['sudo', '-H', '-u', 'bob', '/bin/sh'],
+ input=b'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+
+class TestClusterShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.cluster_shell = crmsh.sh.ClusterShell(mock.Mock(crmsh.sh.LocalShell), mock.Mock(UserOfHost))
+ self.cluster_shell.local_shell.hostname.return_value = 'node1'
+ self.cluster_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.cluster_shell.local_shell.can_run_as.return_value = True
+ self.cluster_shell.user_of_host.user_pair_for_ssh.return_value = ('alice', 'bob')
+
+ def test_subprocess_run_without_input(self):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.cluster_shell.user_of_host.user_pair_for_ssh.assert_called_once_with('node2')
+ args, kwargs = self.cluster_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertIn('-u root', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar',
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
diff --git a/test/unittests/test_time.py b/test/unittests/test_time.py
new file mode 100644
index 0000000..b252a5d
--- /dev/null
+++ b/test/unittests/test_time.py
@@ -0,0 +1,24 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import utils
+from crmsh import logtime
+import time
+import datetime
+import dateutil.tz
+
+
+def test_time_convert1():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ dt = utils.parse_time('Jun 01, 2015 10:00:00')
+ assert logtime.human_date(dt) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
+
+
+def test_time_convert2():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ ts = time.localtime(utils.parse_to_timestamp('Jun 01, 2015 10:00:00'))
+ assert time.strftime('%Y-%m-%d %H:%M:%S', ts) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
diff --git a/test/unittests/test_ui_cluster.py b/test/unittests/test_ui_cluster.py
new file mode 100644
index 0000000..a86936f
--- /dev/null
+++ b/test/unittests/test_ui_cluster.py
@@ -0,0 +1,173 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import ui_cluster
+
+logging.basicConfig(level=logging.INFO)
+
+class TestCluster(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ui_cluster_inst = ui_cluster.Cluster()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_do_start_already_started(self, mock_qdevice_configured, mock_parse_nodes, mock_active, mock_info):
+ mock_qdevice_configured.return_value = False
+ context_inst = mock.Mock()
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_active.side_effect = [True, True]
+ self.ui_cluster_inst.do_start(context_inst, "node1", "node2")
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node2")
+ ])
+ mock_info.assert_has_calls([
+ mock.call("The cluster stack already started on node1"),
+ mock.call("The cluster stack already started on node2")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_vote')
+ @mock.patch('crmsh.bootstrap.start_pacemaker')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_start(self, mock_parse_nodes, mock_active, mock_start, mock_qdevice_configured, mock_info, mock_start_pacemaker, mock_check_qdevice):
+ context_inst = mock.Mock()
+ mock_start_pacemaker.return_value = ["node1"]
+ mock_parse_nodes.return_value = ["node1"]
+ mock_active.side_effect = [False, False]
+ mock_qdevice_configured.return_value = True
+
+ self.ui_cluster_inst.do_start(context_inst, "node1")
+
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("corosync-qdevice.service", remote_addr="node1")
+ ])
+ mock_start.assert_called_once_with("corosync-qdevice", node_list=["node1"])
+ mock_qdevice_configured.assert_called_once_with()
+ mock_info.assert_called_once_with("The cluster stack started on node1")
+
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop_return(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [False, False]
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_dc.assert_not_called()
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ @mock.patch('crmsh.ui_cluster.Cluster._set_dlm')
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc,
+ mock_set_dlm, mock_service_manager, mock_info, mock_debug):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [True, False]
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.stop_service.side_effect = [["node1"], ["node1"], ["node1"]]
+ mock_service_manager_inst.service_is_active.return_value = True
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_debug.assert_called_once_with("stop node list: ['node1']")
+ mock_dc.assert_called_once_with("node1")
+ mock_set_dlm.assert_called_once_with("node1")
+ mock_service_manager_inst.stop_service.assert_has_calls([
+ mock.call("pacemaker", node_list=["node1"]),
+ mock.call("corosync-qdevice.service", node_list=["node1"]),
+ mock.call("corosync", node_list=["node1"]),
+ ])
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_corosync(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [False, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_pacemaker(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, True]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is True
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_info.assert_not_called()
diff --git a/test/unittests/test_upgradeuitl.py b/test/unittests/test_upgradeuitl.py
new file mode 100644
index 0000000..56c7284
--- /dev/null
+++ b/test/unittests/test_upgradeuitl.py
@@ -0,0 +1,54 @@
+import os
+import sys
+import unittest
+from unittest import mock
+
+from crmsh import upgradeutil
+
+
+class TestUpgradeCondition(unittest.TestCase):
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_force_upgrade(self, mock_stat: mock.MagicMock, mock_get_file_content):
+ mock_stat.return_value = mock.Mock(os.stat_result)
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_non_existent_seq(
+ self,
+ mock_stat: mock.MagicMock,
+ mock_get_file_content: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'0.1\n'
+ mock_current_upgrade_seq.__gt__.return_value = True
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_not_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'1.0\n'
+ mock_current_upgrade_seq.__gt__.return_value = False
+ self.assertFalse(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
diff --git a/test/unittests/test_utils.py b/test/unittests/test_utils.py
new file mode 100644
index 0000000..bf06fbd
--- /dev/null
+++ b/test/unittests/test_utils.py
@@ -0,0 +1,1514 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for utils.py
+
+import os
+import socket
+import re
+import imp
+import subprocess
+import unittest
+import pytest
+import logging
+from unittest import mock
+from itertools import chain
+
+import crmsh.utils
+from crmsh import utils, config, tmpfiles, constants, parallax
+
+logging.basicConfig(level=logging.DEBUG)
+
+def setup_function():
+ utils._ip_for_cloud = None
+ # Mock memoize method and reload the module under test later with imp
+ mock.patch('crmsh.utils.memoize', lambda x: x).start()
+ imp.reload(utils)
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_print_cluster_nodes(mock_run):
+ mock_run.return_value = (0, "data", None)
+ utils.print_cluster_nodes()
+ mock_run.assert_called_once_with("crm_node -l")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_package_is_installed_local(mock_run):
+ mock_run.return_value = (0, None)
+ res = utils.package_is_installed("crmsh")
+ assert res is True
+ mock_run.assert_called_once_with("rpm -q --quiet crmsh")
+
+
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included_target_not_exist(mock_detect):
+ mock_detect.side_effect = [True, False]
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is False
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included(mock_detect, mock_run):
+ mock_detect.side_effect = [True, True]
+ mock_run.side_effect = ["data data", "data"]
+
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is True
+
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+ mock_run.assert_has_calls([
+ mock.call("cat file2", host=None),
+ mock.call("cat file1", host=None)
+ ])
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None1(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (1, None)
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ res = utils.get_nodeid_from_name("node1")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_not_called()
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None2(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search.return_value = None
+ res = utils.get_nodeid_from_name("node111")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node111 ', mock_get_stdout.return_value[1], re.M)
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ mock_re_search_inst.group.return_value = '172168231'
+ res = utils.get_nodeid_from_name("node2")
+ assert res == '172168231'
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node2 ', mock_get_stdout.return_value[1], re.M)
+ mock_re_search_inst.group.assert_called_once_with(1)
+
+
+@mock.patch('crmsh.sh.LocalShell.get_rc_and_error')
+def test_check_ssh_passwd_need(mock_run):
+ mock_run.return_value = (1, 'foo')
+ res = utils.check_ssh_passwd_need("bob", "alice", "node1")
+ assert res is True
+ mock_run.assert_called_once_with(
+ "bob",
+ " ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15 -T -o Batchmode=yes alice@node1 true",
+ )
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_cluster_run_cmd_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.cluster_run_cmd("test")
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.list_cluster_nodes_except_me()
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.this_node')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me(mock_list_nodes, mock_this_node):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.list_cluster_nodes_except_me()
+ assert res == ["node2"]
+ mock_list_nodes.assert_called_once_with()
+ mock_this_node.assert_called_once_with()
+
+
+def test_to_ascii():
+ assert utils.to_ascii(None) is None
+ assert utils.to_ascii('test') == 'test'
+ assert utils.to_ascii(b'test') == 'test'
+ # Test not utf-8 characters
+ with mock.patch('traceback.print_exc') as mock_traceback:
+ assert utils.to_ascii(b'te\xe9st') == 'test'
+ mock_traceback.assert_called_once_with()
+
+
+def test_systeminfo():
+ assert utils.getuser() is not None
+ assert utils.gethomedir() is not None
+ assert utils.get_tempdir() is not None
+
+
+def test_shadowcib():
+ assert utils.get_cib_in_use() == ""
+ utils.set_cib_in_use("foo")
+ assert utils.get_cib_in_use() == "foo"
+ utils.clear_cib_in_use()
+ assert utils.get_cib_in_use() == ""
+
+
+def test_booleans():
+ truthy = ['yes', 'Yes', 'True', 'true', 'TRUE',
+ 'YES', 'on', 'On', 'ON']
+ falsy = ['no', 'false', 'off', 'OFF', 'FALSE', 'nO']
+ not_truthy = ['', 'not', 'ONN', 'TRUETH', 'yess']
+ for case in chain(truthy, falsy):
+ assert utils.verify_boolean(case) is True
+ for case in truthy:
+ assert utils.is_boolean_true(case) is True
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is True
+ for case in falsy:
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is True
+ assert utils.get_boolean(case, dflt=True) is False
+ for case in not_truthy:
+ assert utils.verify_boolean(case) is False
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is False
+
+
+def test_olist():
+ lst = utils.olist(['B', 'C', 'A'])
+ lst.append('f')
+ lst.append('aA')
+ lst.append('_')
+ assert 'aa' in lst
+ assert 'a' in lst
+ assert list(lst) == ['b', 'c', 'a', 'f', 'aa', '_']
+
+
+def test_add_sudo():
+ tmpuser = config.core.user
+ try:
+ config.core.user = 'root'
+ assert utils.add_sudo('ls').startswith('sudo')
+ config.core.user = ''
+ assert utils.add_sudo('ls') == 'ls'
+ finally:
+ config.core.user = tmpuser
+
+
+def test_str2tmp():
+ txt = "This is a test string"
+ filename = utils.str2tmp(txt)
+ assert os.path.isfile(filename)
+ assert open(filename).read() == txt + "\n"
+ assert utils.file2str(filename) == txt
+ os.unlink(filename)
+
+
+@mock.patch('logging.Logger.error')
+def test_sanity(mock_error):
+ sane_paths = ['foo/bar', 'foo', '/foo/bar', 'foo0',
+ 'foo_bar', 'foo-bar', '0foo', '.foo',
+ 'foo.bar']
+ insane_paths = ['#foo', 'foo?', 'foo*', 'foo$', 'foo[bar]',
+ 'foo`', "foo'", 'foo/*']
+ for p in sane_paths:
+ assert utils.is_path_sane(p)
+ for p in insane_paths:
+ assert not utils.is_path_sane(p)
+ sane_filenames = ['foo', '0foo', '0', '.foo']
+ insane_filenames = ['foo/bar']
+ for p in sane_filenames:
+ assert utils.is_filename_sane(p)
+ for p in insane_filenames:
+ assert not utils.is_filename_sane(p)
+ sane_names = ['foo']
+ insane_names = ["f'o"]
+ for n in sane_names:
+ assert utils.is_name_sane(n)
+ for n in insane_names:
+ assert not utils.is_name_sane(n)
+
+
+def test_nvpairs2dict():
+ assert utils.nvpairs2dict(['a=b', 'c=d']) == {'a': 'b', 'c': 'd'}
+ assert utils.nvpairs2dict(['a=b=c', 'c=d']) == {'a': 'b=c', 'c': 'd'}
+ assert utils.nvpairs2dict(['a']) == {'a': None}
+
+
+def test_validity():
+ assert utils.is_id_valid('foo0')
+ assert not utils.is_id_valid('0foo')
+
+
+def test_msec():
+ assert utils.crm_msec('1ms') == 1
+ assert utils.crm_msec('1s') == 1000
+ assert utils.crm_msec('1us') == 0
+ assert utils.crm_msec('1') == 1000
+ assert utils.crm_msec('1m') == 60*1000
+ assert utils.crm_msec('1h') == 60*60*1000
+
+
+def test_parse_sysconfig():
+ """
+ bsc#1129317: Fails on this line
+
+ FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+ """
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ sc = utils.parse_sysconfig(fname)
+ assert ("FW_SERVICES_ACCEPT_EXT" in sc)
+
+def test_sysconfig_set():
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, FW_SERVICES_ACCEPT_EXT="foo=bar", FOO="bar")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("FW_SERVICES_ACCEPT_EXT") == "foo=bar")
+ assert (sc.get("FOO") == "bar")
+
+def test_sysconfig_set_bsc1145823():
+ s = '''# this is test
+#age=1000
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, age="100")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("age") == "100")
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_false(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = False
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 1
+
+ assert utils.check_port_open("10.10.10.1", 22) is False
+
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+ mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("10.10.10.1", 22))
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_true(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = True
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 0
+
+ assert utils.check_port_open("2001:db8:10::7", 22) is True
+
+ mock_is_ipv6.assert_called_once_with("2001:db8:10::7")
+ mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("2001:db8:10::7", 22))
+
+def test_valid_port():
+ assert utils.valid_port(1) is False
+ assert utils.valid_port(10000000) is False
+ assert utils.valid_port(1234) is True
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_false(mock_get_value):
+ mock_get_value.return_value = "ip"
+ assert utils.is_qdevice_configured() is False
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_true(mock_get_value):
+ mock_get_value.return_value = "net"
+ assert utils.is_qdevice_configured() is True
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_false(mock_get_value):
+ mock_get_value.return_value = "off"
+ assert utils.is_qdevice_tls_on() is False
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_true(mock_get_value):
+ mock_get_value.return_value = "on"
+ assert utils.is_qdevice_tls_on() is True
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool_return_none(mock_get_stdout):
+ mock_get_stdout.return_value = (1, None)
+ assert bool(utils.get_nodeinfo_from_cmaptool()) is False
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+
+@mock.patch("re.findall")
+@mock.patch("re.search")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool(mock_get_stdout, mock_search, mock_findall):
+ mock_get_stdout.return_value = (0, 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)\nruntime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ match_inst1 = mock.Mock()
+ match_inst2 = mock.Mock()
+ mock_search.side_effect = [match_inst1, match_inst2]
+ match_inst1.group.return_value = '1'
+ match_inst2.group.return_value = '2'
+ mock_findall.side_effect = [["192.168.43.129"], ["192.168.43.128"]]
+
+ result = utils.get_nodeinfo_from_cmaptool()
+ assert result['1'] == ["192.168.43.129"]
+ assert result['2'] == ["192.168.43.128"]
+
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+ mock_search.assert_has_calls([
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+ match_inst1.group.assert_called_once_with(1)
+ match_inst2.group.assert_called_once_with(1)
+ mock_findall.assert_has_calls([
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false_service_not_active(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = False
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_not_called()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_true(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("2") is True
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_aws() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_xen(mock_run):
+ mock_run.side_effect = ["4.2.amazon", "Xen"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_kvm(mock_run):
+ mock_run.side_effect = ["Not Specified", "Amazon EC2"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_azure() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_microsoft_corporation(mock_run, mock_request):
+ mock_run.side_effect = ["microsoft corporation", "test"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_chassis(mock_run, mock_request):
+ mock_run.side_effect = ["test", "7783-7084-3265-9085-8269-3286-77"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp_false(mock_run):
+ mock_run.return_value = "test"
+ assert utils.detect_gcp() is False
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp(mock_run, mock_request):
+ mock_run.return_value = "Google instance"
+ mock_request.return_value = "data"
+ assert utils.detect_gcp() is True
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_no_cmd(mock_is_program):
+ mock_is_program.return_value = False
+ assert utils.detect_cloud() is None
+ mock_is_program.assert_called_once_with("dmidecode")
+
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_aws(mock_is_program, mock_aws):
+ mock_is_program.return_value = True
+ mock_aws.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AWS
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_azure(mock_is_program, mock_aws, mock_azure):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AZURE
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_gcp")
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_gcp(mock_is_program, mock_aws, mock_azure, mock_gcp):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = False
+ mock_gcp.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_GCP
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+ mock_gcp.assert_called_once_with()
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_interface_choice(mock_get_stdout):
+ ip_a_output = """
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
+ link/ether 52:54:00:9e:1b:4f brd ff:ff:ff:ff:ff:ff
+ inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fe9e:1b4f/64 scope link
+ valid_lft forever preferred_lft forever
+3: br-933fa0e1438c: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
+ link/ether 9e:fe:24:df:59:49 brd ff:ff:ff:ff:ff:ff
+ inet 10.10.10.1/24 brd 10.10.10.255 scope global br-933fa0e1438c
+ valid_lft forever preferred_lft forever
+ inet6 fe80::9cfe:24ff:fedf:5949/64 scope link
+ valid_lft forever preferred_lft forever
+4: veth3fff6e9@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
+ link/ether 1e:2c:b3:73:6b:42 brd ff:ff:ff:ff:ff:ff link-netnsid 0
+ inet6 fe80::1c2c:b3ff:fe73:6b42/64 scope link
+ valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
+"""
+ mock_get_stdout.return_value = (0, ip_a_output)
+ assert utils.interface_choice() == ["enp1s0", "br-933fa0e1438c", "veth3fff6e9"]
+ mock_get_stdout.assert_called_once_with("ip a")
+
+
+class TestIP(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ip_inst = utils.IP("10.10.10.1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('ipaddress.ip_address')
+ def test_ip_address(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock()
+ mock_ip_address.return_value = mock_ip_address_inst
+ self.ip_inst.ip_address
+ mock_ip_address.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_version(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(version=4)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.version
+ self.assertEqual(res, mock_ip_address_inst.version)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_mcast(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_multicast=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = utils.IP.is_mcast("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.version', new_callable=mock.PropertyMock)
+ def test_is_ipv6(self, mock_version):
+ mock_version.return_value = 4
+ res = utils.IP.is_ipv6("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_version.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_loopback(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_loopback=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.is_loopback
+ self.assertEqual(res, mock_ip_address_inst.is_loopback)
+ mock_ip_address.assert_called_once_with()
+
+
+class TestInterface(unittest.TestCase):
+ """
+ Unitary tests for class utils.Interface
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interface = utils.Interface("10.10.10.123/24")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_ip_with_mask(self):
+ assert self.interface.ip_with_mask == "10.10.10.123/24"
+
+ @mock.patch('ipaddress.ip_interface')
+ def test_ip_interface(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ self.interface.ip_interface
+ mock_ip_interface.assert_called_once_with("10.10.10.123/24")
+
+ @mock.patch('crmsh.utils.Interface.ip_interface', new_callable=mock.PropertyMock)
+ def test_network(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ mock_ip_interface_inst.network = mock.Mock(network_address="10.10.10.0")
+ assert self.interface.network == "10.10.10.0"
+ mock_ip_interface.assert_called_once_with()
+
+
+class TestInterfacesInfo(unittest.TestCase):
+ """
+ Unitary tests for class utils.InterfacesInfo
+ """
+
+ network_output_error = """1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
+2: enp1s0 inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+61: tun0 inet 10.163.45.46 peer 10.163.45.45/32 scope global tun0"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interfaces_info = utils.InterfacesInfo()
+ self.interfaces_info_with_second_hb = utils.InterfacesInfo(second_heartbeat=True)
+ self.interfaces_info_with_custom_nic = utils.InterfacesInfo(second_heartbeat=True, custom_nic_list=['eth1'])
+ self.interfaces_info_with_wrong_nic = utils.InterfacesInfo(custom_nic_list=['eth7'])
+ self.interfaces_info_fake = utils.InterfacesInfo()
+ self.interfaces_info_fake._nic_info_dict = {
+ "eth0": [mock.Mock(ip="10.10.10.1", network="10.10.10.0"), mock.Mock(ip="10.10.10.2", network="10.10.10.0")],
+ "eth1": [mock.Mock(ip="20.20.20.1", network="20.20.20.0")]
+ }
+ self.interfaces_info_fake._default_nic_list = ["eth7"]
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_no_address(self, mock_run):
+ only_lo = "1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever"
+ mock_run.return_value = (0, only_lo, None)
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info.get_interfaces_info()
+ self.assertEqual("No address configured", str(err.exception))
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+
+ @mock.patch('crmsh.utils.Interface')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_one_addr(self, mock_run, mock_interface):
+ mock_run.return_value = (0, self.network_output_error, None)
+ mock_interface_inst_1 = mock.Mock(is_loopback=True, is_link_local=False)
+ mock_interface_inst_2 = mock.Mock(is_loopback=False, is_link_local=False)
+ mock_interface.side_effect = [mock_interface_inst_1, mock_interface_inst_2]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_second_hb.get_interfaces_info()
+ self.assertEqual("Cannot configure second heartbeat, since only one address is available", str(err.exception))
+
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+ mock_interface.assert_has_calls([
+ mock.call("127.0.0.1/8"),
+ mock.call("192.168.122.241/24")
+ ])
+
+ def test_nic_list(self):
+ res = self.interfaces_info_fake.nic_list
+ self.assertEqual(res, ["eth0", "eth1"])
+
+ def test_interface_list(self):
+ res = self.interfaces_info_fake.interface_list
+ assert len(res) == 3
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_ip_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(ip="10.10.10.1"),
+ mock.Mock(ip="10.10.10.2")
+ ]
+ res = self.interfaces_info_fake.ip_list
+ self.assertEqual(res, ["10.10.10.1", "10.10.10.2"])
+ mock_interface_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_get_local_ip_list(self, mock_get_info, mock_ip_list):
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.get_local_ip_list(False)
+ self.assertEqual(res, mock_ip_list.return_value)
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_ip_in_local(self, mock_get_info, mock_is_ipv6, mock_ip_list):
+ mock_is_ipv6.return_value = False
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.ip_in_local("10.10.10.1")
+ assert res is True
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_network_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(network="10.10.10.0"),
+ mock.Mock(network="20.20.20.0")
+ ]
+ res = self.interfaces_info.network_list
+ self.assertEqual(res, list(set(["10.10.10.0", "20.20.20.0"])))
+ mock_interface_list.assert_called_once_with()
+
+ def test_nic_first_ip(self):
+ res = self.interfaces_info_fake._nic_first_ip("eth0")
+ self.assertEqual(res, "10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route_no_default(self, mock_run, mock_get_interfaces_info, mock_warn, mock_nic_list):
+ output = """10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51"""
+ mock_run.return_value = (0, output, None)
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth0"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+ mock_warn.assert_called_once_with("No default route configured. Using the first found nic")
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route(self, mock_run):
+ output = """default via 192.168.122.1 dev eth8 proto dhcp
+ 10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51
+ 192.168.122.0/24 dev eth8 proto kernel scope link src 192.168.122.120"""
+ mock_run.return_value = (0, output, None)
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth8"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list_failed_detect(self, mock_nic_list):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_wrong_nic.get_default_ip_list()
+ self.assertEqual("Failed to detect IP address for eth7", str(err.exception))
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.utils.InterfacesInfo._nic_first_ip')
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list(self, mock_nic_list, mock_first_ip):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"], ["eth0", "eth1"]]
+ mock_first_ip.side_effect = ["10.10.10.1", "20.20.20.1"]
+
+ res = self.interfaces_info_with_custom_nic.get_default_ip_list()
+ self.assertEqual(res, ["10.10.10.1", "20.20.20.1"])
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_first_ip.assert_has_calls([mock.call("eth1"), mock.call("eth0")])
+
+
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeid(mock_get_nodeid):
+ mock_get_nodeid.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeinfo(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = {"1": ["10.10.10.1"], "2": ["10.10.10.2"]}
+ res = utils.get_iplist_from_name("test")
+ assert res == ["10.10.10.1"]
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_ping_node(mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with pytest.raises(ValueError) as err:
+ utils.ping_node("node_unreachable")
+ assert str(err.value) == 'host "node_unreachable" is unreachable: error data'
+ mock_run.assert_called_once_with("ping -c 1 node_unreachable")
+
+
+def test_calculate_quorate_status():
+ assert utils.calculate_quorate_status(3, 2) is True
+ assert utils.calculate_quorate_status(3, 1) is False
+
+
+@mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+def test_get_quorum_votes_dict(mock_run):
+ mock_run.return_value = """
+Votequorum information
+----------------------
+Expected votes: 1
+Highest expected: 1
+Total votes: 1
+Quorum: 1
+Flags: Quorate
+ """
+ res = utils.get_quorum_votes_dict()
+ assert res == {'Expected': '1', 'Total': '1'}
+ mock_run.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+def test_re_split_string():
+ assert utils.re_split_string('[; ]', "/dev/sda1; /dev/sdb1 ; ") == ["/dev/sda1", "/dev/sdb1"]
+ assert utils.re_split_string('[; ]', "/dev/sda1 ") == ["/dev/sda1"]
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_has_dev_partitioned(mock_get_dev_info):
+ mock_get_dev_info.return_value = """
+disk
+part
+ """
+ res = utils.has_dev_partitioned("/dev/sda1")
+ assert res is True
+ mock_get_dev_info.assert_called_once_with("/dev/sda1", "NAME", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_local(mock_get_dev_uuid):
+ mock_get_dev_uuid.return_value = ""
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on local"
+ mock_get_dev_uuid.assert_called_once_with("/dev/sdb1")
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_peer(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", ""]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", "5678"]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "UUID of /dev/sdb1 not same with peer node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_used_for_lvm(mock_dev_info):
+ mock_dev_info.return_value = "lvm"
+ res = utils.is_dev_used_for_lvm("/dev/sda1")
+ assert res is True
+ mock_dev_info.assert_called_once_with("/dev/sda1", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_a_plain_raw_disk_or_partition(mock_dev_info):
+ mock_dev_info.return_value = "raid1\nlvm"
+ res = utils.is_dev_a_plain_raw_disk_or_partition("/dev/md127")
+ assert res is False
+ mock_dev_info.assert_called_once_with("/dev/md127", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_dev_info(mock_run):
+ mock_run.return_value = "data"
+ res = utils.get_dev_info("/dev/sda1", "TYPE")
+ assert res == "data"
+ mock_run.assert_called_once_with("lsblk -fno TYPE /dev/sda1", None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_fs_type(mock_get_info):
+ mock_get_info.return_value = "data"
+ res = utils.get_dev_fs_type("/dev/sda1")
+ assert res == "data"
+ mock_get_info.assert_called_once_with("/dev/sda1", "FSTYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_uuid(mock_get_info):
+ mock_get_info.return_value = "uuid"
+ res = utils.get_dev_uuid("/dev/sda1")
+ assert res == "uuid"
+ mock_get_info.assert_called_once_with("/dev/sda1", "UUID", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number_except(mock_run):
+ mock_run.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.get_pe_number("vg1")
+ assert str(err.value) == "Cannot find PE on VG(vg1)"
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number(mock_run):
+ mock_run.return_value = """
+PE Size 4.00 MiB
+Total PE 1534
+Alloc PE / Size 1534 / 5.99 GiB
+ """
+ res = utils.get_pe_number("vg1")
+ assert res == 1534
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_all_vg_name(mock_run):
+ mock_run.return_value = """
+--- Volume group ---
+ VG Name ocfs2-vg
+ System ID
+ """
+ res = utils.get_all_vg_name()
+ assert res == ["ocfs2-vg"]
+ mock_run.assert_called_once_with("vgdisplay")
+
+
+@mock.patch('crmsh.utils.randomword')
+def test_gen_unused_id(mock_rand):
+ mock_rand.return_value = "1234xxxx"
+ res = utils.gen_unused_id(["test-id"], "test-id")
+ assert res == "test-id-1234xxxx"
+ mock_rand.assert_called_once_with(6)
+
+
+@mock.patch('random.choice')
+def test_randomword(mock_rand):
+ import string
+ mock_rand.side_effect = ['z', 'f', 'k', 'e', 'c', 'd']
+ res = utils.randomword()
+ assert res == "zfkecd"
+ mock_rand.assert_has_calls([mock.call(string.ascii_lowercase) for x in range(6)])
+
+
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_all_exist_id(mock_cib):
+ mock_cib.refresh = mock.Mock()
+ mock_cib.id_list = mock.Mock()
+ mock_cib.id_list.return_value = ['1', '2']
+ res = utils.all_exist_id()
+ assert res == ['1', '2']
+ mock_cib.id_list.assert_called_once_with()
+ mock_cib.refresh.assert_called_once_with()
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_mount_point_used(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_mount_point_used("/opt")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_disk_mounted(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_disk_mounted("/dev/vda2")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_stonith_running(mock_run, mock_diskless):
+ mock_run.return_value = """
+stonith-sbd
+1 fence device found
+ """
+ mock_diskless.return_value = True
+ res = utils.has_stonith_running()
+ assert res is True
+ mock_run.assert_called_once_with("stonith_admin -L")
+ mock_diskless.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device_error(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.side_effect = OSError
+ res = utils.is_block_device("/dev/sda1")
+ assert res is False
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.return_value = True
+ res = utils.is_block_device("/dev/sda1")
+ assert res is True
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.ping_node')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_check_all_nodes_reachable(mock_run, mock_ping):
+ mock_run.return_value = "1084783297 15sp2-1 member"
+ utils.check_all_nodes_reachable()
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_ping.assert_called_once_with("15sp2-1")
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_detect_virt(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.detect_virt() is True
+ mock_run.assert_called_once_with("systemd-detect-virt")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_is_standby(mock_run):
+ mock_run.return_value = """
+Node List:
+* Node 15sp2-1: standby
+ """
+ assert utils.is_standby("15sp2-1") is True
+ mock_run.assert_called_once_with("crm_mon -1")
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_dlm_option_dict(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+key1=value1
+key2=value2
+ """
+ res_dict = utils.get_dlm_option_dict()
+ assert res_dict == {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("dlm_tool dump_config", None)
+
+
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option_exception(mock_get_dict):
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ with pytest.raises(ValueError) as err:
+ utils.set_dlm_option(name="xin")
+ assert str(err.value) == '"name" is not dlm config option'
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option(mock_get_dict, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ utils.set_dlm_option(key2="test")
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with('dlm_tool set_config "key2=test"', None)
+
+
+@mock.patch('crmsh.utils.has_resource_configured')
+def test_is_dlm_configured(mock_configured):
+ mock_configured.return_value = True
+ assert utils.is_dlm_configured() is True
+ mock_configured.assert_called_once_with(constants.DLM_CONTROLD_RA, peer=None)
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate_exception(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.is_quorate()
+ assert str(err.value) == "Failed to get quorate status from corosync-quorumtool"
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+Ring ID: 1084783297/440
+Quorate: Yes
+ """
+ assert utils.is_quorate() is True
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none_no_reg(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes(no_reg=True)
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=True)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_cib_not_exist(mock_run, mock_env, mock_isfile):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = False
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+
+
+@mock.patch('crmsh.xmlutil.file2cib_elem')
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes(mock_run, mock_env, mock_isfile, mock_file2elem):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = True
+ mock_cib_inst = mock.Mock()
+ mock_file2elem.return_value = mock_cib_inst
+ mock_node_inst1 = mock.Mock()
+ mock_node_inst2 = mock.Mock()
+ mock_node_inst1.get.side_effect = ["node1", "remote"]
+ mock_node_inst2.get.side_effect = ["node2", "member"]
+ mock_cib_inst.xpath.side_effect = [[mock_node_inst1, mock_node_inst2], "data"]
+
+ res = utils.list_cluster_nodes()
+ assert res == ["node2"]
+
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_file2elem.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_cib_inst.xpath.assert_has_calls([
+ mock.call(constants.XML_NODE_PATH),
+ mock.call("//primitive[@id='node1']/instance_attributes/nvpair[@name='server']")
+ ])
+
+
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_property(mock_run, mock_env):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", "")
+ mock_env.return_value = "cib.xml"
+ assert utils.get_property("no-quorum-policy") == "data"
+ mock_run_inst.get_rc_stdout_stderr_without_input.assert_called_once_with(None, "CIB_file=cib.xml sudo --preserve-env=CIB_file crm configure get_property no-quorum-policy")
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property(mock_get, mock_run, mock_warn):
+ mock_get.return_value = "start"
+ utils.set_property("no-quorum-policy", "stop")
+ mock_run.assert_called_once_with("crm configure property no-quorum-policy=stop")
+ mock_warn.assert_called_once_with('"no-quorum-policy" in crm_config is set to stop, it was start')
+
+
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_the_same(mock_get):
+ mock_get.return_value = "value1"
+ utils.set_property("no-quorum-policy", "value1")
+ mock_get.assert_called_once_with("no-quorum-policy", "crm_config")
+
+
+@mock.patch('crmsh.utils.crm_msec')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_conditional(mock_get, mock_msec):
+ mock_get.return_value = "10s"
+ mock_msec.side_effect = ["1000", "1000"]
+ utils.set_property("timeout", "10", conditional=True)
+ mock_get.assert_called_once_with("timeout", "crm_config")
+ mock_msec.assert_has_calls([mock.call("10s"), mock.call("10")])
+
+
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm_return(mock_dlm):
+ mock_dlm.return_value = False
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.get_property')
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm(mock_dlm, mock_get_property, mock_warn):
+ mock_dlm.return_value = True
+ mock_get_property.return_value = "stop"
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+ mock_get_property.assert_called_once_with("no-quorum-policy")
+ mock_warn.assert_called_once_with('The DLM cluster best practice suggests to set the cluster property "no-quorum-policy=freeze"')
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_is_2node_cluster_without_qdevice(mock_list, mock_is_qdevice):
+ mock_list.return_value = ["node1", "node2"]
+ mock_is_qdevice.return_value = False
+ res = utils.is_2node_cluster_without_qdevice()
+ assert res is True
+ mock_list.assert_called_once_with()
+ mock_is_qdevice.assert_called_once_with()
+
+
+def test_get_systemd_timeout_start_in_sec():
+ res = utils.get_systemd_timeout_start_in_sec("1min 31s")
+ assert res == 91
+
+
+@mock.patch('crmsh.utils.is_larger_than_min_version')
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_is_ocf_1_1_cib_schema_detected(mock_cib, mock_larger):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_cib.get_schema = mock.Mock()
+ mock_cib.get_schema.return_value = "pacemaker-3.5"
+ mock_larger.return_value = True
+ assert utils.is_ocf_1_1_cib_schema_detected() is True
+ mock_cib.get_schema.assert_called_once_with()
+ mock_larger.assert_called_once_with("pacemaker-3.5", constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1(mock_support, mock_warn):
+ mock_support.return_value = False
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Master"
+ mock_support.assert_called_once_with()
+ mock_warn.assert_called_once_with('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', "Promoted", "Master", constants.CIB_UPGRADE)
+
+
+@mock.patch('logging.Logger.info')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_convert_new(mock_support, mock_info):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Master") == "Promoted"
+ mock_support.assert_called_once_with()
+ mock_info.assert_called_once_with('Convert deprecated "%s" to "%s"', "Master", "Promoted")
+
+
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_return(mock_support):
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Promoted"
+ mock_support.assert_called_once_with()
+
+
+def test_handle_role_for_ocf_1_1_return_not_role():
+ assert utils.handle_role_for_ocf_1_1("test", name='other') == "test"
+
+
+def test_compatible_role():
+ assert utils.compatible_role("Slave", "Unpromoted") is True
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_fetch_cluster_node_list_from_node(mock_run, mock_warn):
+ mock_run.return_value = """
+
+ 1 node1
+ 2 node2 lost
+ 3 node3 member
+ """
+ assert utils.fetch_cluster_node_list_from_node("node1") == ["node3"]
+ mock_run.assert_called_once_with("crm_node -l", "node1")
+ mock_warn.assert_has_calls([
+ mock.call("The node '%s' has no known name and/or state information", "1"),
+ mock.call("The node '%s'(state '%s') is not a current member", "node2", "lost")
+ ])
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+def test_cluster_copy_file_return(mock_list_nodes):
+ mock_list_nodes.return_value = []
+ assert utils.cluster_copy_file("/file1") == True
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_has_sudo_access(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.has_sudo_access() is True
+ mock_run.assert_called_once_with("sudo -S -k -n id -u")
+
+
+@mock.patch('grp.getgrgid')
+@mock.patch('os.getgroups')
+def test_in_haclient(mock_group, mock_getgrgid):
+ mock_group.return_value = [90, 100]
+ mock_getgrgid_inst1 = mock.Mock(gr_name=constants.HA_GROUP)
+ mock_getgrgid_inst2 = mock.Mock(gr_name="other")
+ mock_getgrgid.side_effect = [mock_getgrgid_inst1, mock_getgrgid_inst2]
+ assert utils.in_haclient() is True
+ mock_group.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_root(mock_user, mock_in):
+ mock_user.return_value = 'root'
+ utils.check_user_access('cluster')
+ mock_in.assert_not_called()
+
+
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_haclient(mock_user, mock_in, mock_sudo):
+ mock_user.return_value = 'user'
+ mock_in.return_value = True
+ utils.check_user_access('ra')
+ mock_sudo.assert_not_called()
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_need_sudo(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = True
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo"')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_acl(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('This command needs higher privilege.\nOption 1) Please consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'\nOption 2) Add "user" to the haclient group. For example:\n sudo usermod -g haclient user')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_cluster(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('cluster')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo".\nCurrently, this command needs to use sudo to escalate itself as root.\nPlease consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'')
diff --git a/test/unittests/test_watchdog.py b/test/unittests/test_watchdog.py
new file mode 100644
index 0000000..957f21f
--- /dev/null
+++ b/test/unittests/test_watchdog.py
@@ -0,0 +1,311 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import watchdog
+from crmsh import bootstrap
+from crmsh import constants
+
+
+class TestWatchdog(unittest.TestCase):
+ """
+ Unitary tests for crmsh.watchdog.Watchdog
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.watchdog_inst = watchdog.Watchdog()
+ self.watchdog_join_inst = watchdog.Watchdog(remote_user="alice", peer_host="node1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_watchdog_device_name(self):
+ res = self.watchdog_inst.watchdog_device_name
+ assert res is None
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_ignore_error(self, mock_run):
+ mock_run.return_value = (1, None, "error")
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog", True)
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError) as err:
+ self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ mock_error.assert_called_once_with("Invalid watchdog device /dev/watchdog: error")
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.watchdog.invoke')
+ def test_load_watchdog_driver(self, mock_invoke):
+ self.watchdog_inst._load_watchdog_driver("softdog")
+ mock_invoke.assert_has_calls([
+ mock.call("echo softdog > /etc/modules-load.d/watchdog.conf"),
+ mock.call("systemctl restart systemd-modules-load")
+ ])
+
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ def test_get_watchdog_device_from_sbd_config(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/watchdog"
+ res = self.watchdog_inst._get_watchdog_device_from_sbd_config()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_parse.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_driver_is_loaded(self, mock_run):
+ output = """
+button 24576 0
+softdog 16384 2
+btrfs 1474560 1
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_inst._driver_is_loaded("softdog")
+ assert res is not None
+ mock_run.assert_called_once_with("lsmod")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ self.watchdog_inst._set_watchdog_info()
+ mock_run.assert_called_once_with(watchdog.Watchdog.QUERY_CMD)
+ mock_error.assert_called_once_with("Failed to run {}: error".format(watchdog.Watchdog.QUERY_CMD))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ self.watchdog_inst._set_watchdog_info()
+ self.assertEqual(self.watchdog_inst._watchdog_info_dict, {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'})
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver_none(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = False
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, None)
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = True
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, "/dev/watchdog1")
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ self.watchdog_join_inst._get_driver_through_device_remotely("test")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+ mock_error.assert_called_once_with("Failed to run sudo sbd query-watchdog remotely: error")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_none(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, None)
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, "softdog")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ def test_get_first_unused_device_none(self):
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, None)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_first_unused_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_verify.assert_called_once_with("/dev/watchdog", ignore_error=True)
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input_from_config(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_verify.return_value = True
+ self.watchdog_inst._set_input()
+ mock_first.assert_not_called()
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = None
+ mock_first.return_value = None
+ self.watchdog_inst._set_input()
+ self.assertEqual(self.watchdog_inst._input, "softdog")
+ mock_from_config.assert_called_once_with()
+ mock_verify.assert_not_called()
+ mock_first.assert_called_once_with()
+
+ def test_valid_device_false(self):
+ res = self.watchdog_inst._valid_device("test")
+ self.assertEqual(res, False)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_valid_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._valid_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog_error(self, mock_set_info, mock_from_config, mock_error):
+ mock_from_config.return_value = None
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.watchdog_join_inst.join_watchdog()
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_error.assert_called_once_with("Failed to get watchdog device from {}".format(bootstrap.SYSCONFIG_SBD))
+
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._get_driver_through_device_remotely')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog(self, mock_set_info, mock_from_config, mock_valid, mock_get_driver_remotely, mock_load):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_valid.return_value = False
+ mock_get_driver_remotely.return_value = "softdog"
+
+ self.watchdog_join_inst.join_watchdog()
+
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+ mock_get_driver_remotely.assert_called_once_with("/dev/watchdog")
+ mock_load.assert_called_once_with("softdog")
+
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_valid(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc):
+ mock_valid.return_value = True
+ self.watchdog_inst._input = "/dev/watchdog"
+ self.watchdog_inst.init_watchdog()
+ mock_invokerc.assert_not_called()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_error(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_error):
+ mock_valid.return_value = False
+ mock_invokerc.return_value = False
+ self.watchdog_inst._input = "test"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("test")
+ mock_invokerc.assert_called_once_with("modinfo test")
+ mock_error.assert_called_once_with("Should provide valid watchdog device or driver name by -w option")
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_device_through_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._driver_is_loaded')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_is_loaded, mock_load, mock_get_device):
+ mock_valid.return_value = False
+ self.watchdog_inst._input = "softdog"
+ mock_invokerc.return_value = True
+ mock_is_loaded.return_value = False
+ mock_get_device.return_value = "/dev/watchdog"
+
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("softdog")
+ mock_invokerc.assert_called_once_with("modinfo softdog")
+ mock_is_loaded.assert_called_once_with("softdog")
+ mock_load.assert_called_once_with("softdog")
+ mock_set_info.assert_has_calls([mock.call(), mock.call()])
+ mock_get_device.assert_called_once_with("softdog")
diff --git a/test/unittests/test_xmlutil.py b/test/unittests/test_xmlutil.py
new file mode 100644
index 0000000..48393bf
--- /dev/null
+++ b/test/unittests/test_xmlutil.py
@@ -0,0 +1,61 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import xmlutil, constants
+
+
+class TestCrmMonXmlParser(unittest.TestCase):
+ """
+ Unitary tests for crmsh.xmlutil.CrmMonXmlParser
+ """
+ @mock.patch('crmsh.sh.cluster_shell')
+ def setUp(self, mock_cluster_shell):
+ """
+ Test setUp.
+ """
+ data = '''
+<data>
+ <nodes>
+ <node name="tbw-1" id="1084783148" online="true" standby="true" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="3" type="member"/>
+ <node name="tbw-2" id="1084783312" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+ </nodes>
+ <resources>
+ <resource id="ocfs2-dlm" resource_agent="ocf::pacemaker:controld" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ <resource id="ocfs2-clusterfs" resource_agent="ocf::heartbeat:Filesystem" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ </resources>
+</data>
+ '''
+ mock_cluster_shell().get_rc_stdout_stderr_without_input.return_value = (0, data, '')
+ self.parser_inst = xmlutil.CrmMonXmlParser()
+
+ def test_is_node_online(self):
+ assert self.parser_inst.is_node_online("tbw-1") is True
+ assert self.parser_inst.is_node_online("tbw-2") is False
+
+ def test_get_node_list(self):
+ assert self.parser_inst.get_node_list("standby") == ['tbw-1']
+ assert self.parser_inst.get_node_list("online") == ['tbw-2']
+
+ def test_is_resource_configured(self):
+ assert self.parser_inst.is_resource_configured("test") is False
+ assert self.parser_inst.is_resource_configured("ocf::heartbeat:Filesystem") is True
+
+ def test_is_any_resource_running(self):
+ assert self.parser_inst.is_any_resource_running() is True
+
+ def test_is_resource_started(self):
+ assert self.parser_inst.is_resource_started("test") is False
+ assert self.parser_inst.is_resource_started("ocfs2-clusterfs") is True
+ assert self.parser_inst.is_resource_started("ocf::pacemaker:controld") is True
+
+ def test_get_resource_id_list_via_type(self):
+ assert self.parser_inst.get_resource_id_list_via_type("test") == []
+ assert self.parser_inst.get_resource_id_list_via_type("ocf::pacemaker:controld")[0] == "ocfs2-dlm"
diff --git a/test/update-expected-output.sh b/test/update-expected-output.sh
new file mode 100755
index 0000000..496b73d
--- /dev/null
+++ b/test/update-expected-output.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+crmtestout="$1"
+
+[ -d "$crmtestout" ] || { echo "usage: $0 <test-output-dir>"; exit 1; }
+
+for f in $crmtestout/*.diff; do
+ fil=$(grep -- --- $f | awk '{print $2}' | sed 's/\/usr\/share\/crmsh\/tests/\/test/g')
+ awk "NR==1{\$2=\"a$fil\"}1" < "$f" | awk "NR==2{\$2=\"b$fil\"}1"
+done
diff --git a/test_container/Dockerfile b/test_container/Dockerfile
new file mode 100644
index 0000000..c099d31
--- /dev/null
+++ b/test_container/Dockerfile
@@ -0,0 +1,28 @@
+FROM opensuse/leap:15.5
+MAINTAINER Xin Liang <XLiang@suse.com>
+
+CMD ["/usr/lib/systemd/systemd", "--system"]
+
+RUN zypper refresh && \
+ zypper -n install systemd \
+ make autoconf automake vim which libxslt-tools mailx iproute2 iputils bzip2 openssh tar file glibc-locale-base firewalld libopenssl1_1 dos2unix iptables \
+ python3 python3-pip python3-lxml python3-python-dateutil python3-setuptools python3-PyYAML python3-curses python3-behave \
+ csync2 libglue-devel corosync corosync-qdevice pacemaker booth corosync-qnetd
+RUN zypper --non-interactive up zypper && \
+ zypper ar -f -G https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15_SP4 repo_nhf && \
+ zypper --non-interactive refresh && \
+ zypper --non-interactive up --allow-vendor-change -y resource-agents libqb100 pacemaker
+
+RUN ssh-keygen -t rsa -f /root/.ssh/id_rsa -N '' && \
+ cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \
+ chmod 0600 /root/.ssh/authorized_keys
+
+
+RUN python3 -m pip install coverage
+
+RUN mkdir -p /var/log/crmsh
+
+COPY behave_agent.py /opt
+COPY behave-agent.socket /etc/systemd/system
+COPY behave-agent@.service /etc/systemd/system
+RUN systemctl enable behave-agent.socket
diff --git a/test_container/behave-agent.socket b/test_container/behave-agent.socket
new file mode 100644
index 0000000..1212d30
--- /dev/null
+++ b/test_container/behave-agent.socket
@@ -0,0 +1,9 @@
+[Unit]
+Description=behave test agent
+
+[Socket]
+ListenStream=1122
+Accept=yes
+
+[Install]
+WantedBy=sockets.target
diff --git a/test_container/behave-agent@.service b/test_container/behave-agent@.service
new file mode 100644
index 0000000..eadc420
--- /dev/null
+++ b/test_container/behave-agent@.service
@@ -0,0 +1,9 @@
+[Unit]
+Description=behave test agent
+CollectMode=inactive-or-failed
+
+[Service]
+ExecStart=/opt/behave_agent.py
+StandardInput=socket
+StandardOutput=socket
+StandardError=journal
diff --git a/test_container/behave_agent.py b/test_container/behave_agent.py
new file mode 100755
index 0000000..49d32d4
--- /dev/null
+++ b/test_container/behave_agent.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+import io
+import os
+import pwd
+import socket
+import struct
+import subprocess
+
+
+MSG_EOF = 0
+MSG_USER = 1
+MSG_CMD = 2
+MSG_OUT = 4
+MSG_ERR = 5
+MSG_RC = 6
+
+
+class Message:
+ @staticmethod
+ def write(output, type: int, data: bytes):
+ output.write(struct.pack('!ii', type, len(data)))
+ output.write(data)
+
+ @staticmethod
+ def read(input):
+ buf = input.read(8)
+ type, length = struct.unpack('!ii', buf)
+ if length > 0:
+ buf = input.read(length)
+ else:
+ buf = b''
+ return type, buf
+
+
+class SocketIO(io.RawIOBase):
+ def __init__(self, s: socket.socket):
+ self._socket = s
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return True
+
+ def read(self, __size: int = -1) -> bytes:
+ return self._socket.recv(__size)
+
+ def readinto(self, __buffer) -> int:
+ return self._socket.recv_into(__buffer)
+
+ def readall(self) -> bytes:
+ raise NotImplementedError
+
+ def write(self, __b) -> int:
+ return self._socket.send(__b)
+
+
+def call(host: str, port: int, cmdline: str):
+ family, type, proto, _, sockaddr = socket.getaddrinfo(host, port, type=socket.SOCK_STREAM)[0]
+ with socket.socket(family, type, proto) as s:
+ s.connect(sockaddr)
+ sout = io.BufferedWriter(SocketIO(s), 4096)
+ Message.write(sout, MSG_USER, _getuser().encode('utf-8'))
+ Message.write(sout, MSG_CMD, cmdline.encode('utf-8'))
+ Message.write(sout, MSG_EOF, b'')
+ sout.flush()
+ s.shutdown(socket.SHUT_WR)
+ rc = None
+ stdout = []
+ stderr = []
+ sin = io.BufferedReader(SocketIO(s), 4096)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_OUT:
+ stdout.append(buf)
+ elif type == MSG_ERR:
+ stderr.append(buf)
+ elif type == MSG_RC:
+ rc, = struct.unpack('!i', buf)
+ elif type == MSG_EOF:
+ s.shutdown(socket.SHUT_RD)
+ assert rc is not None
+ return rc, b''.join(stdout), b''.join(stderr)
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+
+
+def serve(stdin, stdout, stderr):
+ assert os.geteuid() == 0
+ user = None
+ cmd = None
+ sin = io.BufferedReader(stdin)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_USER:
+ user = buf.decode('utf-8')
+ elif type == MSG_CMD:
+ cmd = buf.decode('utf-8')
+ elif type == MSG_EOF:
+ assert user is not None
+ assert cmd is not None
+ break
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+ if user == 'root':
+ args = ['/bin/sh']
+ else:
+ args = ['/bin/su', '-', user, '-c', '/bin/sh']
+ result = subprocess.run(
+ args,
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ sout = io.BufferedWriter(stdout)
+ Message.write(sout, MSG_RC, struct.pack('!i', result.returncode))
+ Message.write(sout, MSG_OUT, result.stdout)
+ Message.write(sout, MSG_ERR, result.stderr)
+ Message.write(sout, MSG_EOF, b'')
+ stdout.flush()
+
+
+def _getuser():
+ return pwd.getpwuid(os.geteuid()).pw_name
+
+
+if __name__ == '__main__':
+ with open(0, 'rb') as stdin, \
+ open(1, 'wb') as stdout, \
+ open(2, 'wb') as stderr:
+ serve(stdin, stdout, stderr)