summaryrefslogtreecommitdiffstats
path: root/test/testcases
diff options
context:
space:
mode:
Diffstat (limited to 'test/testcases')
-rw-r--r--test/testcases/acl60
-rw-r--r--test/testcases/acl.excl1
-rw-r--r--test/testcases/acl.exp94
-rw-r--r--test/testcases/basicset18
-rw-r--r--test/testcases/bugs79
-rw-r--r--test/testcases/bugs.exp215
-rw-r--r--test/testcases/bundle20
-rw-r--r--test/testcases/bundle.exp57
-rw-r--r--test/testcases/commit39
-rw-r--r--test/testcases/commit.exp90
-rw-r--r--test/testcases/common.excl26
-rwxr-xr-xtest/testcases/common.filter9
-rw-r--r--test/testcases/confbasic91
-rw-r--r--test/testcases/confbasic-xml72
-rw-r--r--test/testcases/confbasic-xml.exp206
-rwxr-xr-xtest/testcases/confbasic-xml.filter2
-rw-r--r--test/testcases/confbasic.exp199
-rw-r--r--test/testcases/delete64
-rw-r--r--test/testcases/delete.exp194
-rw-r--r--test/testcases/edit95
-rw-r--r--test/testcases/edit.excl1
-rw-r--r--test/testcases/edit.exp437
-rw-r--r--test/testcases/file14
-rw-r--r--test/testcases/file.exp77
-rw-r--r--test/testcases/history42
-rw-r--r--test/testcases/history.excl3
-rw-r--r--test/testcases/history.exp600
-rwxr-xr-xtest/testcases/history.post3
-rwxr-xr-xtest/testcases/history.pre3
-rw-r--r--test/testcases/newfeatures44
-rw-r--r--test/testcases/newfeatures.exp81
-rw-r--r--test/testcases/node14
-rw-r--r--test/testcases/node.exp204
-rw-r--r--test/testcases/options23
-rw-r--r--test/testcases/options.exp64
-rw-r--r--test/testcases/ra7
-rw-r--r--test/testcases/ra.exp150
-rwxr-xr-xtest/testcases/ra.filter17
-rw-r--r--test/testcases/resource84
-rw-r--r--test/testcases/resource.exp1450
-rw-r--r--test/testcases/rset21
-rw-r--r--test/testcases/rset-xml19
-rw-r--r--test/testcases/rset-xml.exp53
-rw-r--r--test/testcases/rset.exp66
-rw-r--r--test/testcases/scripts14
-rw-r--r--test/testcases/scripts.exp305
-rwxr-xr-xtest/testcases/scripts.filter4
-rw-r--r--test/testcases/shadow10
-rw-r--r--test/testcases/shadow.exp24
-rwxr-xr-xtest/testcases/xmlonly.sh5
50 files changed, 5470 insertions, 0 deletions
diff --git a/test/testcases/acl b/test/testcases/acl
new file mode 100644
index 0000000..ebc9531
--- /dev/null
+++ b/test/testcases/acl
@@ -0,0 +1,60 @@
+show ACL
+node node1
+property enable-acl=true
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d0 ocf:pacemaker:Dummy
+primitive d1 ocf:pacemaker:Dummy
+role basic-read \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+role basic-read-basic \
+ read cib
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny tag:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+acl_target cyrus cyrus-role
+_test
+verify
+.
diff --git a/test/testcases/acl.excl b/test/testcases/acl.excl
new file mode 100644
index 0000000..31d13f7
--- /dev/null
+++ b/test/testcases/acl.excl
@@ -0,0 +1 @@
+INFO: 5: already using schema pacemaker-1.2
diff --git a/test/testcases/acl.exp b/test/testcases/acl.exp
new file mode 100644
index 0000000..f00405c
--- /dev/null
+++ b/test/testcases/acl.exp
@@ -0,0 +1,94 @@
+.TRY ACL
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: property enable-acl=true
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d0 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: role basic-read read status read type:node attribute:uname read type:node attribute:type read property
+.INP: role basic-read-basic read cib
+.INP: role d0-admin write meta:d0:target-role write meta:d0:is-managed read ref:d0
+.INP: role silly-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read utilization:d0 read property:stonith-enabled write property read node read node:node1 read nodeattr read nodeattr:a1 read nodeutil read nodeutil:node1 read status read cib
+.INP: role silly-role-two read xpath:"//nodes//attributes" deny tag:nvpair deny ref:d0
+.INP: acl_target alice basic-read-basic
+.INP: acl_target bob d0-admin basic-read-basic
+.INP: role cyrus-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read property
+.INP: acl_target cyrus cyrus-role
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1
+primitive d0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ enable-acl=true
+role basic-read \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role basic-read-basic \
+ read cib
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny type:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+acl_target cyrus \
+ cyrus-role
+.INP: commit
diff --git a/test/testcases/basicset b/test/testcases/basicset
new file mode 100644
index 0000000..4f023bf
--- /dev/null
+++ b/test/testcases/basicset
@@ -0,0 +1,18 @@
+confbasic
+bundle
+confbasic-xml
+edit
+rset
+rset-xml
+delete
+node
+resource
+file
+shadow
+ra
+acl
+history
+newfeatures
+commit
+bugs
+scripts
diff --git a/test/testcases/bugs b/test/testcases/bugs
new file mode 100644
index 0000000..28219ae
--- /dev/null
+++ b/test/testcases/bugs
@@ -0,0 +1,79 @@
+session Configuration bugs
+options
+sort_elements false
+up
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p4 Dummy
+primitive p3 Dummy
+primitive p2 Dummy
+primitive p1 Dummy
+colocation c1 inf: p1 p2
+filter "sed 's/p1 p2/& p3/'" c1
+show c1
+delete c1
+colocation c2 inf: [ p1 p2 ] p3 p4
+filter "sed 's/\\\[/\\\(/;s/\\\]/\\\)/'" c2
+show c2
+primitive p5 Dummy
+primitive p6 Dummy
+clone cl-p5 p5
+show
+commit
+_test
+verify
+show
+.
+session Unordered load file
+options
+sort_elements false
+up
+configure
+load update bugs-test.txt
+show
+commit
+_test
+verify
+.
+session Unknown properties
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+show
+commit
+_test
+verify
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+show
+commit
+_test
+verify
+.
+session template
+configure
+erase
+node node1
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+template
+new vip virtual-ip params ip=10.10.10.123
+load vip
+apply update
+up
+commit
+_test
+verify
+.
diff --git a/test/testcases/bugs.exp b/test/testcases/bugs.exp
new file mode 100644
index 0000000..af05e82
--- /dev/null
+++ b/test/testcases/bugs.exp
@@ -0,0 +1,215 @@
+.TRY Configuration bugs
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: erase
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p4 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p3 Dummy
+.INP: primitive p2 Dummy
+.INP: primitive p1 Dummy
+.INP: colocation c1 inf: p1 p2
+.INP: filter "sed 's/p1 p2/& p3/'" c1
+.INP: show c1
+colocation c1 inf: p1 p2 p3
+.INP: delete c1
+.INP: colocation c2 inf: [ p1 p2 ] p3 p4
+.INP: filter "sed 's/\[/\(/;s/\]/\)/'" c2
+.INP: show c2
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: primitive p5 Dummy
+.INP: primitive p6 Dummy
+.INP: clone cl-p5 p5
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.TRY Unordered load file
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: load update bugs-test.txt
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive gr1 Dummy
+primitive gr2 Dummy
+primitive gr3 Dummy
+primitive gr4 Dummy
+group g1 gr1 gr2
+group g2 gr3
+group g3 gr4
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+location loc1 g1 \
+ rule 200: #uname eq node1
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: _test
+.INP: verify
+.TRY Unknown properties
+.INP: configure
+.INP: erase
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: modified location:loc1 from g1 to gr2
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: property SAPHanaSR: hana_ha1_site_lss_WDF1=4
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: property SAPHanaSR_2: hana_ha1_site_iss_WDF1=cde hana_ha1_site_bss_WDF1=abc
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+.INP: commit
+.INP: _test
+.INP: verify
+.TRY template
+.INP: configure
+.INP: erase
+.INP: node node1
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: template
+.INP: new vip virtual-ip params ip=10.10.10.123
+INFO: 6: pulling in template virtual-ip
+.INP: load vip
+.INP: apply update
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr
+.EXT crm_resource --list-ocf-alternatives IPaddr
+.INP: up
+.INP: commit
+.INP: _test
+.INP: verify
diff --git a/test/testcases/bundle b/test/testcases/bundle
new file mode 100644
index 0000000..463687d
--- /dev/null
+++ b/test/testcases/bundle
@@ -0,0 +1,20 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+property stonith-enabled=true
+_test
+verify
+.
diff --git a/test/testcases/bundle.exp b/test/testcases/bundle.exp
new file mode 100644
index 0000000..f6284ce
--- /dev/null
+++ b/test/testcases/bundle.exp
@@ -0,0 +1,57 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+.INP: primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+.INP: property stonith-enabled=true
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive dummy Dummy \
+ meta target-role=Stopped \
+ op monitor interval=10 timeout=20s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+bundle bundle-test1 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 \
+ storage storage-mapping id=storage1 target-dir=test source-dir=test \
+ meta target-role=Stopped
+bundle bundle-test2 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 \
+ primitive dummy \
+ meta target-role=Stopped priority=1
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: commit
diff --git a/test/testcases/commit b/test/testcases/commit
new file mode 100644
index 0000000..67b27c3
--- /dev/null
+++ b/test/testcases/commit
@@ -0,0 +1,39 @@
+show Commits of all kinds
+op_defaults timeout=2m
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m
+commit
+node node1 \
+ attributes mem=16G
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+primitive p2 ocf:heartbeat:Dummy
+primitive p3 ocf:heartbeat:Dummy
+group g1 p1 p2
+clone c1 g1
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+commit
+rename p3 pp3
+commit
+rename pp3 p3
+delete c1
+commit
+group g2 d1 d2
+commit
+delete g2
+commit
+filter "sed '/g1/s/p1/d1/'"
+group g2 d3 d2
+delete d2
+commit
+_test
+verify
+.
diff --git a/test/testcases/commit.exp b/test/testcases/commit.exp
new file mode 100644
index 0000000..59d291c
--- /dev/null
+++ b/test/testcases/commit.exp
@@ -0,0 +1,90 @@
+.TRY Commits of all kinds
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: primitive st stonith:null params hostlist='node1' meta yoyo-meta="yoyo 2" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: commit
+WARNING: 7: st: unknown attribute 'yoyo-meta'
+.INP: node node1 attributes mem=16G
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p2 ocf:heartbeat:Dummy
+.INP: primitive p3 ocf:heartbeat:Dummy
+.INP: group g1 p1 p2
+.INP: clone c1 g1
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: commit
+.INP: rename p3 pp3
+INFO: 21: modified location:l1 from p3 to pp3
+INFO: 21: modified order:o1 from p3 to pp3
+INFO: 21: modified colocation:cl1 from p3 to pp3
+.INP: commit
+.INP: rename pp3 p3
+INFO: 23: modified location:l1 from pp3 to p3
+INFO: 23: modified order:o1 from pp3 to p3
+INFO: 23: modified colocation:cl1 from pp3 to p3
+.INP: delete c1
+INFO: 24: modified order:o1 from c1 to g1
+INFO: 24: modified colocation:cl1 from c1 to g1
+.INP: commit
+.INP: group g2 d1 d2
+.INP: commit
+.INP: delete g2
+.INP: commit
+.INP: filter "sed '/g1/s/p1/d1/'"
+.INP: group g2 d3 d2
+.INP: delete d2
+.INP: commit
+.INP: _test
+.INP: verify
+WARNING: 35: st: unknown attribute 'yoyo-meta'
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 p2
+group g2 d3
+colocation cl1 inf: g1 p3
+location l1 p3 100: node1
+order o1 Mandatory: p3 g1
+op_defaults op-options: \
+ timeout=2m
+.INP: commit
+INFO: 37: apparently there is nothing to commit
+INFO: 37: try changing something first
diff --git a/test/testcases/common.excl b/test/testcases/common.excl
new file mode 100644
index 0000000..4902553
--- /dev/null
+++ b/test/testcases/common.excl
@@ -0,0 +1,26 @@
+Could not send fail-count-p0=\(null\) update via attrd: connection failed
+Could not send fail-count-p0=<none> update via attrd: connection failed
+Could not send s1=\(null\) update via attrd: connection failed
+Could not send s1=<none> update via attrd: connection failed
+Error performing operation: The object/attribute does not exist
+Error setting fail-count-p0=5 \(section=status, set=status-node1\): The object/attribute does not exist
+Error setting s1=1 2 3 \(section=status, set=status-node1\): The object/attribute does not exist
+Error signing on to the CRMd service
+Error connecting to the controller
+Error performing operation: Transport endpoint is not connected
+Error performing operation: Not connected
+.EXT crm_resource --list-ocf-providers
+.EXT crm_resource --list-ocf-alternatives Delay
+.EXT crm_resource --list-ocf-alternatives Dummy
+^\.EXT crmd version
+^\.EXT cibadmin \-Ql
+^\.EXT crm_verify \-VV \-p
+^\.EXT cibadmin \-p \-P
+^\.EXT crm_diff \-\-help
+^\.EXT crm_diff \-o [^ ]+ \-n \-
+^\.EXT crm_diff \-\-no\-version \-o [^ ]+ \-n \-
+^\.EXT sed ['][^']+
+^\.EXT sed ["][^"]+
+^\.EXT [a-zA-Z]+ validate-all
+^[ ]+File ["][^"]+
+^.*\: ([0-9]+\: )?\(cluster\_status\) warning\: Fencing and resource management disabled due to lack of quorum
diff --git a/test/testcases/common.filter b/test/testcases/common.filter
new file mode 100755
index 0000000..03846c2
--- /dev/null
+++ b/test/testcases/common.filter
@@ -0,0 +1,9 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\.EXT \/(.+)/ { gsub(/\/.*\//, "", $2) }
+/\.EXT >\/dev\/null 2>&1 \/(.+)/ { gsub(/\/.*\//, "", $4) }
+/\.EXT pacemaker-fenced/ { gsub(/pacemaker-fenced/,"stonithd") }
+/\.EXT pacemaker-controld/ { gsub(/pacemaker-controld/,"crmd") }
+/\.EXT pacemaker-schedulerd/ { gsub(/pacemaker-schedulerd/,"pengine") }
+/\.EXT pacemaker-based/ { gsub(/pacemaker-based/,"cib") }
+{ print }
diff --git a/test/testcases/confbasic b/test/testcases/confbasic
new file mode 100644
index 0000000..b06016b
--- /dev/null
+++ b/test/testcases/confbasic
@@ -0,0 +1,91 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+primitive d7 Dummy \
+ params rule inf: #uname eq node1 fake=1 \
+ params rule inf: #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m
+primitive d8 ocf:pacemaker:Dummy
+clone m7 d8 \
+ meta promotable=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date spec years="2009" hours="09-17"
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+tag t1: m5 m6
+set d2.mondelay 45
+_test
+verify
+.
+-F node maintenance node1
+-F resource maintenance g1 off
+-F resource maintenance d1
+-F configure property maintenance-mode=true
diff --git a/test/testcases/confbasic-xml b/test/testcases/confbasic-xml
new file mode 100644
index 0000000..58433f5
--- /dev/null
+++ b/test/testcases/confbasic-xml
@@ -0,0 +1,72 @@
+showxml Basic configure (xml dump)
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt 2009-05-26 and \
+ date in start=2009-05-26 end=2009-07-26 and \
+ date in start=2009-05-26 years=2009 and \
+ date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 record-pending=true
+_test
+verify
+.
diff --git a/test/testcases/confbasic-xml.exp b/test/testcases/confbasic-xml.exp
new file mode 100644
index 0000000..20892dc
--- /dev/null
+++ b/test/testcases/confbasic-xml.exp
@@ -0,0 +1,206 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="true" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ <cluster_property_set id="cpset2">
+ <nvpair name="maintenance-mode" value="true" id="cpset2-maintenance-mode"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="node1-instance_attributes">
+ <nvpair name="mem" value="16G" id="node1-instance_attributes-mem"/>
+ </instance_attributes>
+ </node>
+ <node uname="node2" id="node2">
+ <utilization id="node2-utilization">
+ <nvpair name="cpu" value="4" id="node2-utilization-cpu"/>
+ </utilization>
+ </node>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <meta_attributes id="st-meta_attributes">
+ <nvpair name="target-role" value="Started" id="st-meta_attributes-target-role"/>
+ <nvpair name="requires" value="nothing" id="st-meta_attributes-requires"/>
+ </meta_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" interval="60m" timeout="60s" id="st-monitor-60m"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="st2" class="stonith" type="ssh">
+ <instance_attributes id="st2-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st2-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="20" interval="3600" id="st2-monitor-3600"/>
+ <op name="start" timeout="20" interval="0s" id="st2-start-0s"/>
+ <op name="stop" timeout="15" interval="0s" id="st2-stop-0s"/>
+ </operations>
+ </primitive>
+ <group id="g1">
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations id="d1-ops">
+ <op name="monitor" interval="60m" timeout="20s" id="d1-ops-monitor-60m"/>
+ <op name="monitor" interval="120m" timeout="20s" id="d1-ops-monitor-120m">
+ <instance_attributes id="d1-ops-monitor-120m-instance_attributes">
+ <nvpair name="OCF_CHECK_LEVEL" value="10" id="d1-ops-monitor-120m-instance_attributes-OCF_CHECK_LEVEL"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="d1-ops-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-ops-stop-0s"/>
+ <op name="monitor" interval="60s" timeout="30s" id="d1-monitor-60s"/>
+ </operations>
+ </primitive>
+ <primitive id="d2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="d2-instance_attributes">
+ <nvpair name="mondelay" value="60" id="d2-instance_attributes-mondelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="d2-start-0s"/>
+ <op name="stop" timeout="60s" interval="0s" id="d2-stop-0s"/>
+ <op name="monitor" timeout="30s" interval="10s" id="d2-monitor-10s"/>
+ <op name="monitor" role="Started" interval="60s" timeout="30s" id="d2-monitor-60s"/>
+ </operations>
+ </primitive>
+ </group>
+ <clone id="c">
+ <meta_attributes id="c-meta_attributes">
+ <nvpair name="clone-max" value="1" id="c-meta_attributes-clone-max"/>
+ <nvpair name="interleave" value="true" id="c-meta_attributes-interleave"/>
+ </meta_attributes>
+ <primitive id="d3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ <master id="m">
+ <primitive id="d4" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d4-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d4-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d4-stop-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m5">
+ <primitive id="s5" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s5-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s5-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s5-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s5-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s5-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m6">
+ <primitive id="s6" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s6-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s6-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s6-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s6-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s6-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s6-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+ <constraints>
+ <rsc_location id="l1" rsc="g1" score="100" node="node1"/>
+ <rsc_location id="l2" rsc="c">
+ <rule id="l2-rule1" score="100">
+ <expression operation="eq" attribute="#uname" value="node1" id="l2-rule1-expression"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l3" rsc="m5">
+ <rule score="INFINITY" id="l3-rule">
+ <expression operation="eq" attribute="#uname" value="node1" id="l3-rule-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l3-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l4" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l4-rule">
+ <expression operation="not_defined" attribute="pingd" id="l4-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l4-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l5" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l5-rule">
+ <expression operation="not_defined" attribute="pingd" id="l5-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l5-rule-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-0">
+ <expression operation="eq" attribute="#uname" value="node1" id="l5-rule-0-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l5-rule-0-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-1">
+ <date_expression operation="lt" end="2009-05-26" id="l5-rule-1-expression"/>
+ <date_expression operation="in_range" start="2009-05-26" end="2009-07-26" id="l5-rule-1-expression-0"/>
+ <date_expression operation="in_range" start="2009-05-26" id="l5-rule-1-expression-1">
+ <duration years="2009" id="l5-rule-1-expression-1-duration"/>
+ </date_expression>
+ <date_expression operation="date_spec" id="l5-rule-1-expression-2">
+ <date_spec years="2009" hours="09-17" id="l5-rule-1-expression-2-date_spec"/>
+ </date_expression>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l6" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_location id="l7" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>
+ <rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>
+ <rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>
+ <rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>
+ <rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>
+ <rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence">
+ <resource_set id="ticket-B_m6_m5-0">
+ <resource_ref id="m6"/>
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ <rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence">
+ <resource_set id="ticket-C_master-0">
+ <resource_ref id="m6"/>
+ </resource_set>
+ <resource_set role="Master" id="ticket-C_master-1">
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ </constraints>
+ <fencing-topology>
+ <fencing-level target="node1" index="1" devices="st" id="fencing"/>
+ <fencing-level target="node1" index="2" devices="st2" id="fencing-0"/>
+ <fencing-level target="node2" index="1" devices="st" id="fencing-1"/>
+ <fencing-level target="node2" index="2" devices="st2" id="fencing-2"/>
+ </fencing-topology>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="failure-timeout" value="10m" id="rsc-options-failure-timeout"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="opsdef2">
+ <nvpair name="record-pending" value="true" id="opsdef2-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+</cib>
diff --git a/test/testcases/confbasic-xml.filter b/test/testcases/confbasic-xml.filter
new file mode 100755
index 0000000..7b677da
--- /dev/null
+++ b/test/testcases/confbasic-xml.filter
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -v "WARNING"
diff --git a/test/testcases/confbasic.exp b/test/testcases/confbasic.exp
new file mode 100644
index 0000000..5fc2dff
--- /dev/null
+++ b/test/testcases/confbasic.exp
@@ -0,0 +1,199 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: primitive d1 ocf:pacemaker:Dummy operations $id=d1-ops op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: monitor d1 60s:30s
+.INP: primitive d2 ocf:heartbeat:Delay params mondelay=60 op start timeout=60s op stop timeout=60s
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: monitor d2:Started 60s:30s
+.INP: group g1 d1 d2
+.INP: primitive d3 ocf:pacemaker:Dummy
+.INP: clone c d3 meta clone-max=1
+.INP: primitive d4 ocf:pacemaker:Dummy
+.INP: ms m d4
+WARNING: 19: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: delete m
+.INP: master m d4
+WARNING: 21: This command 'master' is deprecated, please use 'ms'
+INFO: 21: "master" is accepted as "ms"
+WARNING: 21: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: primitive s5 ocf:pacemaker:Stateful operations $id-ref=d1-ops
+.EXT crm_resource --show-metadata ocf:pacemaker:Stateful
+.INP: primitive s6 ocf:pacemaker:Stateful operations $id-ref=d1
+.INP: ms m5 s5
+WARNING: 24: "ms" is deprecated. Please use "clone m5 s5 meta promotable=true"
+.INP: ms m6 s6
+WARNING: 25: "ms" is deprecated. Please use "clone m6 s6 meta promotable=true"
+.INP: primitive d7 Dummy params rule inf: #uname eq node1 fake=1 params rule inf: #uname eq node2 fake=2 op start interval=0 timeout=60s op_params 2: rule #uname eq node1 op_param=dummy op_params 1: op_param=smart op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m op_meta 1: start-delay=60m
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d8 ocf:pacemaker:Dummy
+.INP: clone m7 d8 meta promotable=true meta promoted-max=1 meta promoted-node-max=1
+.INP: location l1 g1 100: node1
+.INP: location l2 c rule $id=l2-rule1 100: #uname eq node1
+.INP: location l3 m5 rule inf: #uname eq node1 and pingd gt 0
+.INP: location l4 m5 rule -inf: not_defined pingd or pingd lte 0
+.INP: location l5 m5 rule -inf: not_defined pingd or pingd lte 0 rule inf: #uname eq node1 and pingd gt 0 rule inf: date lt "2009-05-26" and date in start="2009-05-26" end="2009-07-26" and date in start="2009-05-26" years="2009" and date spec years="2009" hours="09-17"
+.INP: location l6 m5 rule $id-ref=l2-rule1
+.INP: location l7 m5 rule $id-ref=l2
+.INP: collocation c1 inf: m6 m5
+WARNING: 36: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 36: "collocation" is accepted as "colocation"
+.INP: collocation c2 inf: m5:Master d1:Started
+WARNING: 37: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 37: "collocation" is accepted as "colocation"
+.INP: order o1 Mandatory: m5 m6
+.INP: order o2 Optional: d1:start m5:promote
+.INP: order o3 Serialize: m5 m6
+.INP: order o4 Mandatory: m5 m6
+.INP: rsc_ticket ticket-A_m6 ticket-A: m6
+.INP: rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+.INP: rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+.INP: fencing_topology st st2
+.INP: property stonith-enabled=true
+.INP: property $id=cpset2 maintenance-mode=true
+.INP: rsc_defaults failure-timeout=10m
+.INP: op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+.INP: tag t1: m5 m6
+.INP: set d2.mondelay 45
+.INP: _test
+.INP: verify
+WARNING: 53: c2: resource d1 is grouped, constraints should apply to the group
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op monitor interval=60s timeout=30s
+primitive d2 Delay \
+ params mondelay=45 \
+ op start timeout=60s interval=0s \
+ op stop timeout=60s interval=0s \
+ op monitor timeout=30s interval=10s \
+ op monitor role=Started interval=60s timeout=30s
+primitive d3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d7 Dummy \
+ params rule #uname eq node1 fake=1 \
+ params rule #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m \
+ op monitor timeout=20s interval=10s \
+ op stop timeout=20s interval=0s
+primitive d8 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 d2
+ms m d4
+ms m5 s5
+ms m6 s6
+clone c d3 \
+ meta clone-max=1 interleave=true
+clone m7 d8 \
+ meta promotable=true interleave=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+tag t1 m5 m6
+colocation c1 inf: m6 m5
+colocation c2 inf: m5:Master d1:Started
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule #uname eq node1 and pingd gt 0 \
+ rule date lt 2009-05-26 and date in start=2009-05-26 end=2009-07-26 and date in start=2009-05-26 years=2009 and date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2-rule1
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+fencing_topology st st2
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+property cib-bootstrap-options: \
+ stonith-enabled=true
+property cpset2: \
+ maintenance-mode=true
+rsc_defaults rsc-options: \
+ failure-timeout=10m
+op_defaults opsdef2: \
+ rule 100: #uname eq node1 \
+ record-pending=true
+.INP: commit
+WARNING: 55: c2: resource d1 is grouped, constraints should apply to the group
+.TRY -F node maintenance node1
+.TRY -F resource maintenance g1 off
+.TRY -F resource maintenance d1
+.TRY -F configure property maintenance-mode=true
+INFO: 'maintenance' attribute already exists in d1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in node1. Remove it? [YES]
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
diff --git a/test/testcases/delete b/test/testcases/delete
new file mode 100644
index 0000000..7d0dc57
--- /dev/null
+++ b/test/testcases/delete
@@ -0,0 +1,64 @@
+session Delete/Rename test
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+show
+_test
+rename d1 p1
+show
+# delete primitive
+delete d2
+_test
+show
+# delete primitive with constraint
+delete p1
+_test
+show
+primitive d1 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+_test
+# delete primitive belonging to a group
+primitive d2 ocf:pacemaker:Dummy
+_test
+group g1 d2 d1
+delete d2
+show
+_test
+delete g1
+show
+verify
+# delete a group which is in a clone
+primitive d2 ocf:pacemaker:Dummy
+group g1 d2 d1
+clone c1 g1
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+_test
+# delete group from a clone (again)
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+# delete primitive and its group and their clone
+delete d2 d1 c1 g1
+show
+_test
+# verify
+verify
+commit
+.
diff --git a/test/testcases/delete.exp b/test/testcases/delete.exp
new file mode 100644
index 0000000..87b1a7a
--- /dev/null
+++ b/test/testcases/delete.exp
@@ -0,0 +1,194 @@
+.TRY Delete/Rename test
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: _test
+.INP: rename d1 p1
+INFO: 13: modified location:d1-pref from d1 to p1
+.INP: show
+node node1
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive
+.INP: delete d2
+.INP: _test
+.INP: show
+node node1
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive with constraint
+.INP: delete p1
+INFO: 20: hanging location:d1-pref deleted
+.INP: _test
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: _test
+.INP: # delete primitive belonging to a group
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 29: modified location:d1-pref from d1 to g1
+.INP: delete d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+group g1 d1
+location d1-pref g1 100: node1
+.INP: _test
+.INP: delete g1
+INFO: 33: modified location:d1-pref from g1 to d1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: verify
+.INP: # delete a group which is in a clone
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: group g1 d2 d1
+INFO: 38: modified location:d1-pref from d1 to g1
+.INP: clone c1 g1
+INFO: 39: modified location:d1-pref from g1 to c1
+.INP: delete g1
+INFO: 40: modified location:d1-pref from c1 to g1
+INFO: 40: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 43: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 44: modified location:d1-pref from g1 to c1
+.INP: _test
+.INP: # delete group from a clone (again)
+.INP: delete g1
+INFO: 47: modified location:d1-pref from c1 to g1
+INFO: 47: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 50: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 51: modified location:d1-pref from g1 to c1
+.INP: # delete primitive and its group and their clone
+.INP: delete d2 d1 c1 g1
+INFO: 53: modified location:d1-pref from c1 to g1
+INFO: 53: modified location:d1-pref from g1 to d2
+INFO: 53: hanging location:d1-pref deleted
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: _test
+.INP: # verify
+.INP: verify
+.INP: commit
diff --git a/test/testcases/edit b/test/testcases/edit
new file mode 100644
index 0000000..7deb115
--- /dev/null
+++ b/test/testcases/edit
@@ -0,0 +1,95 @@
+show Configuration editing
+op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+filter "sed '$agroup g1 p1 p2'"
+show
+filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+show
+filter "sed '$aclone c1 p2'"
+filter "sed 's/p2/g1/'" c1
+filter "sed '/clone/s/g1/p2/'" c1 g1
+filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+filter "sed '1,$d'" c1 g1
+filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+filter "sed '/cl1/s/p3/p2/'"
+filter "sed '/cl1/d'"
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+group g2 d1 d2
+filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+filter "sed 's/ or mem number:lte 0//'" loc-d1
+filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+modgroup g1 add d3
+modgroup g1 remove p1
+modgroup g1 add p1 after p2
+modgroup g1 remove p1
+modgroup g1 add p1 before p2
+modgroup g1 add p1
+modgroup g1 remove st
+modgroup g1 remove c1
+modgroup g1 remove nosuch
+modgroup g1 add c1
+modgroup g1 add nosuch
+filter "sed 's/^/# this is a comment\\n/'" loc-d1
+rsc_defaults $id="rsc_options" failure-timeout=10m
+filter "sed 's/2m/60s/'" op-options
+show op-options
+property stonith-enabled=true
+show cib-bootstrap-options
+filter 'sed "s/stonith-enabled=true//"'
+show cib-bootstrap-options
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+primitive d6 ocf:heartbeat:Dummy
+order o-d456 d4 d5 d6
+tag t-d45: d4 d5
+show type:order
+show related:d4
+show
+commit
+_test
+verify
+primitive a0 ocf:heartbeat:Dummy
+primitive a1 ocf:heartbeat:Dummy
+primitive a2 ocf:heartbeat:Dummy
+primitive a3 ocf:heartbeat:Dummy
+primitive a4 ocf:heartbeat:Dummy
+primitive a5 ocf:heartbeat:Dummy
+primitive a6 ocf:heartbeat:Dummy
+primitive a7 ocf:heartbeat:Dummy
+primitive a8 ocf:heartbeat:Dummy
+primitive a9 ocf:heartbeat:Dummy
+primitive aErr ocf:heartbeat:Dummy
+group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+commit
+cd ..
+cd configure
+filter "sed '/as/s/a9//'"
+filter "sed '/as/s/a1/a1 a9/'"
+commit
+cd ..
+cd configure
+filter "sed '/abs/s/a9//'"
+filter "sed '/abs/s/a8/a8 a9/'"
+show
+commit
+_test
+verify
+.
diff --git a/test/testcases/edit.excl b/test/testcases/edit.excl
new file mode 100644
index 0000000..3589a25
--- /dev/null
+++ b/test/testcases/edit.excl
@@ -0,0 +1 @@
+^\.EXT sed \-[re] ['][^']
diff --git a/test/testcases/edit.exp b/test/testcases/edit.exp
new file mode 100644
index 0000000..3d3bc0b
--- /dev/null
+++ b/test/testcases/edit.exp
@@ -0,0 +1,437 @@
+.TRY Configuration editing
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: node node1 attributes mem=16G
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+.INP: filter "sed '$agroup g1 p1 p2'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p3
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed '$aclone c1 p2'"
+.INP: filter "sed 's/p2/g1/'" c1
+.INP: filter "sed '/clone/s/g1/p2/'" c1 g1
+.INP: filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+.INP: filter "sed '1,$d'" c1 g1
+.INP: filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: filter "sed '/cl1/s/p3/p2/'"
+.INP: filter "sed '/cl1/d'"
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: group g2 d1 d2
+.INP: filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+ERROR: 29: Cannot create group:g1: Child primitive:d1 already in group:g2
+.INP: filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+.INP: filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: filter "sed 's/ or mem number:lte 0//'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: modgroup g1 add d3
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 after p2
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 before p2
+.INP: modgroup g1 add p1
+ERROR: 1: syntax in group: child p1 listed more than once in group g1 parsing 'group g1 p1 p2 d3 p1'
+.INP: modgroup g1 remove st
+ERROR: 42: configure.modgroup: st is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: st is not member of g1
+.INP: modgroup g1 remove c1
+ERROR: 43: configure.modgroup: c1 is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: c1 is not member of g1
+.INP: modgroup g1 remove nosuch
+ERROR: 44: configure.modgroup: nosuch is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: nosuch is not member of g1
+.INP: modgroup g1 add c1
+ERROR: 45: a group may contain only primitives; c1 is clone
+.INP: modgroup g1 add nosuch
+ERROR: 46: g1 refers to missing object nosuch
+.INP: filter "sed 's/^/# this is a comment\n/'" loc-d1
+.INP: rsc_defaults $id="rsc_options" failure-timeout=10m
+.INP: filter "sed 's/2m/60s/'" op-options
+.INP: show op-options
+op_defaults op-options: \
+ timeout=60s
+.INP: property stonith-enabled=true
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: filter 'sed "s/stonith-enabled=true//"'
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options:
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: primitive d6 ocf:heartbeat:Dummy
+.INP: order o-d456 d4 d5 d6
+.INP: tag t-d45: d4 d5
+.INP: show type:order
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+.INP: show related:d4
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+tag t-d45 d4 d5
+order o-d456 d4 d5 d6
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: _test
+.INP: verify
+.INP: primitive a0 ocf:heartbeat:Dummy
+.INP: primitive a1 ocf:heartbeat:Dummy
+.INP: primitive a2 ocf:heartbeat:Dummy
+.INP: primitive a3 ocf:heartbeat:Dummy
+.INP: primitive a4 ocf:heartbeat:Dummy
+.INP: primitive a5 ocf:heartbeat:Dummy
+.INP: primitive a6 ocf:heartbeat:Dummy
+.INP: primitive a7 ocf:heartbeat:Dummy
+.INP: primitive a8 ocf:heartbeat:Dummy
+.INP: primitive a9 ocf:heartbeat:Dummy
+.INP: primitive aErr ocf:heartbeat:Dummy
+.INP: group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/as/s/a9//'"
+.INP: filter "sed '/as/s/a1/a1 a9/'"
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/abs/s/a9//'"
+.INP: filter "sed '/abs/s/a8/a8 a9/'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 89: apparently there is nothing to commit
+INFO: 89: try changing something first
+.INP: _test
+.INP: verify
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 93: apparently there is nothing to commit
+INFO: 93: try changing something first
diff --git a/test/testcases/file b/test/testcases/file
new file mode 100644
index 0000000..5f215b7
--- /dev/null
+++ b/test/testcases/file
@@ -0,0 +1,14 @@
+configure save sample.txt
+%ext cat sample.txt
+configure erase nodes
+configure load replace sample.txt
+%ext sed -i 's/60s/2m/' sample.txt
+%ext sed -i '8a # comment' sample.txt
+session Load update
+configure
+delete m1 p1
+property cluster-recheck-interval="10m"
+load update sample.txt
+.
+configure show
+%ext rm sample.txt
diff --git a/test/testcases/file.exp b/test/testcases/file.exp
new file mode 100644
index 0000000..dce48de
--- /dev/null
+++ b/test/testcases/file.exp
@@ -0,0 +1,77 @@
+.TRY configure save sample.txt
+.EXT cat sample.txt
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+clone c1 p1 \
+ meta interleave=true
+clone m1 p2 \
+ meta promotable=true interleave=true
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.TRY configure erase nodes
+.TRY configure load replace sample.txt
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT sed -i 's/60s/2m/' sample.txt
+.EXT sed -i '8a # comment' sample.txt
+.TRY Load update
+.INP: configure
+.INP: delete m1 p1
+.INP: property cluster-recheck-interval="10m"
+.INP: load update sample.txt
+ERROR: 4: syntax: Unknown command near <op> parsing 'op stop timeout=20s interval=0s'
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.TRY configure show
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ cluster-recheck-interval=10m
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.EXT rm sample.txt
diff --git a/test/testcases/history b/test/testcases/history
new file mode 100644
index 0000000..383fca8
--- /dev/null
+++ b/test/testcases/history
@@ -0,0 +1,42 @@
+session History
+history
+source history-test.tar.bz2
+info
+events
+node 15sp1-1
+node 15sp1-2
+node .*
+exclude pcmk_peer_update
+exclude
+node 15sp1-2
+exclude clear
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+exclude clear
+peinputs
+peinputs v
+transitions
+refresh
+resource d1
+# reduce report span
+timeframe "2019-03-22 15:07:37"
+peinputs
+resource d1
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+transition log
+transition nograph
+transition -1 nograph
+transition save 0 _crmsh_regtest
+transition log 49
+transition tags 49
+# reset timeframe
+timeframe
+session save _crmsh_regtest
+session load _crmsh_regtest
+session
+session pack
+.
+session History 2
+history
+session load _crmsh_regtest
+exclude
+.
diff --git a/test/testcases/history.excl b/test/testcases/history.excl
new file mode 100644
index 0000000..01f788c
--- /dev/null
+++ b/test/testcases/history.excl
@@ -0,0 +1,3 @@
+^ptest.*:
+^\.EXT tar -C ['][^']+['] -cj -f ['][^']+['] _crmsh_regtest
+^Report saved in ['][^']+
diff --git a/test/testcases/history.exp b/test/testcases/history.exp
new file mode 100644
index 0000000..55cb2c8
--- /dev/null
+++ b/test/testcases/history.exp
@@ -0,0 +1,600 @@
+.TRY History
+.INP: history
+.INP: source history-test.tar.bz2
+.INP: info
+.EXT tar -tj < history-test.tar.bz2 2> /dev/null | head -1
+.EXT tar -xj < history-test.tar.bz2
+Source: history-test.tar.bz2
+Created on: Fri Mar 22 15:08:40 CST 2019
+By: report
+Period: 2019-03-19 01:09:49 - 2019-03-22 23:08:36
+Nodes: 15sp1-1 15sp1-2
+Groups: g1
+Clones:
+Resources: stonith-sbd d1 d2
+Transitions: ... 37* 38* 39* 40* 41 42* 43 44* 45 46 0 48 49* 11 12 13* 15* 16 18 19*
+.INP: events
+2019-03-22T10:56:18.986113+08:00 15sp1-2 mysql(mysql)[2185]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:18.586826+08:00 15sp1-1 mysql(mysql)[4459]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.028197+08:00 15sp1-2 mysql(mysql)[2224]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.082101+08:00 15sp1-2 mysql(mysql)[2259]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.026652+08:00 15sp1-1 pacemaker-schedulerd[1739]: notice: * Recover mysql ( 15sp1-2 -> 15sp1-1 )
+2019-03-22T10:56:19.292370+08:00 15sp1-1 mysql(mysql)[4498]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.646138+08:00 15sp1-1 mysql(mysql)[4533]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T11:02:21.651185+08:00 15sp1-1 pacemakerd[1742]: warning: pacemaker-controld[1749] terminated with signal 9 (core=0)
+2019-03-22T11:45:15.291388+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T11:46:15.982330+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T14:46:29.149904+08:00 15sp1-1 sshd[11637]: error: PAM: Authentication failure for root from 10.67.19.6
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:35:10.376892+08:00 15sp1-2 pacemaker-controld[1750]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:39:50.964158+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:40:41.791107+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=53)
+2019-03-22T10:41:15.144867+08:00 15sp1-2 pacemaker-controld[2965]: notice: Updating quorum status to true (call=31)
+2019-03-22T10:42:43.668990+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:57:27.930481+08:00 15sp1-1 pacemaker-controld[1740]: notice: Peer 15sp1-2 was terminated (reboot) by 15sp1-1 on behalf of pacemaker-controld.1740: OK
+2019-03-22T10:57:52.410569+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=175)
+2019-03-22T11:00:43.930597+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=34)
+2019-03-22T11:01:29.688725+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=62)
+2019-03-22T11:02:23.786295+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=85)
+2019-03-22T10:39:55.137238+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:39:55.137767+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:39:57.604345+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:2467ms queue-time:0ms
+2019-03-22T10:41:13.905506+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:41:13.913809+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:41:13.913941+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:13.914056+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T10:41:13.914284+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T10:41:15.074728+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1170ms queue-time:0ms
+2019-03-22T10:41:16.497053+08:00 15sp1-2 pacemaker-controld[2965]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-2
+2019-03-22T10:41:16.497127+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:16.497217+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:2ms queue-time:0ms
+2019-03-22T10:42:44.878768+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:42:44.880933+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:42:46.405487+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1524ms queue-time:0ms
+2019-03-22T10:43:08.620641+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:43:08.620831+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:43:08.621463+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:1ms queue-time:0ms
+2019-03-22T10:54:17.948621+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:54:17.948709+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:42
+2019-03-22T10:54:19.157468+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:42 exit-code:0 exec-time:1209ms queue-time:0ms
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.496863+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:start call_id:39
+2019-03-22T10:54:48.510603+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:start call_id:39 pid:2145 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:48.474653+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d2_start_0 on 15sp1-2
+2019-03-22T10:54:58.218867+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:stop call_id:40
+2019-03-22T10:54:58.234531+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:stop call_id:40 pid:2150 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.196862+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d2_stop_0 on 15sp1-2
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:00:42.659431+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T11:00:42.660180+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:58
+2019-03-22T11:00:42.660574+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:58 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T11:00:42.661106+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T11:00:42.660196+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:14
+2019-03-22T11:00:43.862608+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:14 exit-code:0 exec-time:1202ms queue-time:0ms
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.233648+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d2_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+2019-03-22T11:03:05.232910+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d2 action:start call_id:22
+2019-03-22T11:03:05.246921+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d2 action:start call_id:22 pid:1852 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T11:45:14.806899+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-1
+2019-03-22T11:45:14.805511+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:stonith-sbd action:start call_id:34
+2019-03-22T11:45:16.071026+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:stonith-sbd action:start call_id:34 exit-code:0 exec-time:1266ms queue-time:0ms
+2019-03-22T11:46:15.742947+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-2
+2019-03-22T11:46:15.743031+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:45
+2019-03-22T11:46:16.907002+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:45 exit-code:0 exec-time:1165ms queue-time:0ms
+.INP: node 15sp1-1
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: node .*
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude pcmk_peer_update
+.INP: exclude
+pcmk_peer_update
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude clear
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: exclude clear
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: peinputs v
+Date Start End Filename Client User Origin Tags
+==== ===== === ======== ====== ==== ====== ====
+2019-03-22 18:35:11 18:35:11 pe-input-3 crmd hacluster 15sp1-1
+2019-03-22 18:36:10 18:36:10 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:37:14 18:37:14 pe-input-5 crmd hacluster 15sp1-1
+2019-03-22 18:39:51 18:39:51 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:39:55 18:39:57 pe-input-5 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 18:39:57 pe-input-6 cibadmin root 15sp1-1
+2019-03-22 18:40:41 18:40:41 pe-input-7 cibadmin root 15sp1-1
+2019-03-22 18:41:13 18:41:15 pe-input-8 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 --:--:-- pe-input-7 crmd hacluster 15sp1-1
+2019-03-22 18:41:16 18:41:16 pe-input-8 crmd hacluster 15sp1-1 stonith-sbd
+2019-03-22 18:42:44 18:42:46 pe-input-9 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 18:43:08 pe-input-10 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 18:43:23 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 18:43:44 18:43:45 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 18:44:29 18:44:29 pe-input-13 cibadmin root 15sp1-1
+2019-03-22 18:44:36 18:44:36 pe-input-14 cibadmin root 15sp1-1
+2019-03-22 18:44:38 18:44:38 pe-input-15 cibadmin root 15sp1-1
+2019-03-22 18:44:59 18:45:00 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 18:45:14 18:45:14 pe-input-17 cibadmin root 15sp1-1
+2019-03-22 18:45:32 18:45:32 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 18:45:37 18:45:37 pe-input-19 cibadmin root 15sp1-1
+2019-03-22 18:48:50 18:48:50 pe-input-20 cibadmin root 15sp1-1
+2019-03-22 18:48:51 --:--:-- pe-input-21 cibadmin root 15sp1-1
+2019-03-22 18:49:48 18:49:48 pe-input-23 cibadmin root 15sp1-1
+2019-03-22 18:49:53 18:49:53 pe-input-24 cibadmin root 15sp1-1
+2019-03-22 18:51:19 18:51:19 pe-input-25 cibadmin root 15sp1-1
+2019-03-22 18:51:39 18:51:39 pe-input-26 cibadmin root 15sp1-1
+2019-03-22 18:51:53 18:51:53 pe-input-27 cibadmin root 15sp1-1
+2019-03-22 18:51:54 --:--:-- pe-input-28 cibadmin root 15sp1-1
+2019-03-22 18:52:06 18:52:06 pe-input-30 cibadmin root 15sp1-1
+2019-03-22 18:52:25 18:52:25 pe-input-31 cibadmin root 15sp1-1
+2019-03-22 18:53:09 18:53:09 pe-input-32 cibadmin root 15sp1-1
+2019-03-22 18:53:15 18:53:15 pe-input-33 cibadmin root 15sp1-1
+2019-03-22 18:53:15 --:--:-- pe-input-34 cibadmin root 15sp1-1
+2019-03-22 18:54:08 18:54:08 pe-input-36 cibadmin root 15sp1-1
+2019-03-22 18:54:17 18:54:19 pe-input-37 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 18:54:48 pe-input-38 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:54:58 18:54:58 pe-input-39 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:56:18 18:56:19 pe-input-40 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 18:56:19 pe-input-41 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-42 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 --:--:-- pe-input-43 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-44 cibadmin root 15sp1-1 error
+2019-03-22 18:56:42 18:56:42 pe-input-45 cibadmin root 15sp1-1
+2019-03-22 18:56:43 --:--:-- pe-input-46 cibadmin root 15sp1-1
+2019-03-22 18:56:55 18:57:27 pe-warn-0 cibadmin root 15sp1-1
+2019-03-22 18:57:52 18:57:52 pe-input-48 cibadmin root 15sp1-1
+2019-03-22 19:00:42 19:00:43 pe-input-49 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 19:01:31 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 19:02:24 19:02:24 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 19:03:05 19:03:05 pe-input-13 cibadmin root 15sp1-1 d1 d2
+2019-03-22 19:45:14 19:45:16 pe-input-15 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:45:16 19:45:16 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:15 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:16 pe-input-19 cibadmin root 15sp1-1 stonith-sbd
+.INP: transitions
+Time Name Node Tags
+2019-03-22 18:35:11 - 18:35:11: pe-input-3 15sp1-2
+2019-03-22 18:36:10 - 18:36:10: pe-input-4 15sp1-2
+2019-03-22 18:37:14 - 18:37:14: pe-input-5 15sp1-2
+2019-03-22 18:39:51 - 18:39:51: pe-input-4 15sp1-1
+2019-03-22 18:39:55 - 18:39:57: pe-input-5 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 - 18:39:57: pe-input-6 15sp1-1
+2019-03-22 18:40:41 - 18:40:41: pe-input-7 15sp1-1
+2019-03-22 18:41:13 - 18:41:15: pe-input-8 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 - --:--:--: pe-input-7 15sp1-2
+2019-03-22 18:41:16 - 18:41:16: pe-input-8 15sp1-2 stonith-sbd
+2019-03-22 18:42:44 - 18:42:46: pe-input-9 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 - 18:43:08: pe-input-10 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 - 18:43:23: pe-input-11 15sp1-1
+2019-03-22 18:43:44 - 18:43:45: pe-input-12 15sp1-1
+2019-03-22 18:44:29 - 18:44:29: pe-input-13 15sp1-1
+2019-03-22 18:44:36 - 18:44:36: pe-input-14 15sp1-1
+2019-03-22 18:44:38 - 18:44:38: pe-input-15 15sp1-1
+2019-03-22 18:44:59 - 18:45:00: pe-input-16 15sp1-1
+2019-03-22 18:45:14 - 18:45:14: pe-input-17 15sp1-1
+2019-03-22 18:45:32 - 18:45:32: pe-input-18 15sp1-1
+2019-03-22 18:45:37 - 18:45:37: pe-input-19 15sp1-1
+2019-03-22 18:48:50 - 18:48:50: pe-input-20 15sp1-1
+2019-03-22 18:48:51 - --:--:--: pe-input-21 15sp1-1
+2019-03-22 18:49:48 - 18:49:48: pe-input-23 15sp1-1
+2019-03-22 18:49:53 - 18:49:53: pe-input-24 15sp1-1
+2019-03-22 18:51:19 - 18:51:19: pe-input-25 15sp1-1
+2019-03-22 18:51:39 - 18:51:39: pe-input-26 15sp1-1
+2019-03-22 18:51:53 - 18:51:53: pe-input-27 15sp1-1
+2019-03-22 18:51:54 - --:--:--: pe-input-28 15sp1-1
+2019-03-22 18:52:06 - 18:52:06: pe-input-30 15sp1-1
+2019-03-22 18:52:25 - 18:52:25: pe-input-31 15sp1-1
+2019-03-22 18:53:09 - 18:53:09: pe-input-32 15sp1-1
+2019-03-22 18:53:15 - 18:53:15: pe-input-33 15sp1-1
+2019-03-22 18:53:15 - --:--:--: pe-input-34 15sp1-1
+2019-03-22 18:54:08 - 18:54:08: pe-input-36 15sp1-1
+2019-03-22 18:54:17 - 18:54:19: pe-input-37 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 - 18:54:48: pe-input-38 15sp1-1 d1 d2
+2019-03-22 18:54:58 - 18:54:58: pe-input-39 15sp1-1 d1 d2
+2019-03-22 18:56:18 - 18:56:19: pe-input-40 15sp1-1 error
+2019-03-22 18:56:19 - 18:56:19: pe-input-41 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-42 15sp1-1 error
+2019-03-22 18:56:19 - --:--:--: pe-input-43 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-44 15sp1-1 error
+2019-03-22 18:56:42 - 18:56:42: pe-input-45 15sp1-1
+2019-03-22 18:56:43 - --:--:--: pe-input-46 15sp1-1
+2019-03-22 18:56:55 - 18:57:27: pe-warn-0 15sp1-1
+2019-03-22 18:57:52 - 18:57:52: pe-input-48 15sp1-1
+2019-03-22 19:00:42 - 19:00:43: pe-input-49 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 - 19:01:31: pe-input-11 15sp1-2
+2019-03-22 19:02:24 - 19:02:24: pe-input-12 15sp1-2
+2019-03-22 19:03:05 - 19:03:05: pe-input-13 15sp1-2 d1 d2
+2019-03-22 19:45:14 - 19:45:16: pe-input-15 15sp1-2 stonith-sbd
+2019-03-22 19:45:16 - 19:45:16: pe-input-16 15sp1-2
+2019-03-22 19:46:15 - 19:46:15: pe-input-18 15sp1-2
+2019-03-22 19:46:15 - 19:46:16: pe-input-19 15sp1-2 stonith-sbd
+.INP: refresh
+Refreshing log data...
+55 transitions, 116 events.
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: # reduce report span
+.INP: timeframe "2019-03-22 15:07:37"
+WARNING: 20: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 20: "timeframe" is accepted as "limit"
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: transition log
+2019-03-22T11:46:15.797222+08:00 15sp1-2 sbd[2770]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:46:15.812786+08:00 15sp1-2 sbd[2774]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+.INP: transition nograph
+INFO: 25: running ptest with history-test/15sp1-2/pengine/pe-input-19.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-19 (19:46:15 - 19:46:16):
+ total 1 actions: 1 Complete
+.INP: transition -1 nograph
+INFO: 26: running ptest with history-test/15sp1-2/pengine/pe-input-18.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-18 (19:46:15 - 19:46:15):
+ total 12 actions: 7 Complete, 1 Skipped, 4 Incomplete
+.INP: transition save 0 _crmsh_regtest
+INFO: 27: transition history-test/15sp1-2/pengine/pe-input-19.bz2 saved to shadow _crmsh_regtest
+.INP: transition log 49
+2019-03-22T11:00:42.614804+08:00 15sp1-1 systemd[1]: Stopped target Timers.
+2019-03-22T11:00:42.615759+08:00 15sp1-1 systemd[1]: Stopped Discard unused blocks once a week.
+2019-03-22T11:00:42.615966+08:00 15sp1-1 systemd[1]: Stopped Scrub btrfs filesystem, verify block checksums.
+2019-03-22T11:00:42.616312+08:00 15sp1-1 systemd[1]: Stopped target Sound Card.
+2019-03-22T11:00:42.616521+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Temporary Directories.
+2019-03-22T11:00:42.616840+08:00 15sp1-1 systemd[1]: Stopped target Multi-User System.
+2019-03-22T11:00:42.617530+08:00 15sp1-1 pacemakerd[1733]: notice: Caught 'Terminated' signal
+2019-03-22T11:00:42.617672+08:00 15sp1-1 pacemakerd[1733]: notice: Shutting down Pacemaker
+2019-03-22T11:00:42.635974+08:00 15sp1-1 systemd[1]: Stopping Pacemaker High Availability Cluster Manager...
+2019-03-22T11:00:42.640402+08:00 15sp1-1 systemd[1]: Stopped target Login Prompts.
+2019-03-22T11:00:42.649788+08:00 15sp1-1 systemd[1]: Stopping Session 1 of user root.
+2019-03-22T11:00:42.656415+08:00 15sp1-1 systemd[1]: Stopping OpenSSH Daemon...
+2019-03-22T11:00:42.659094+08:00 15sp1-1 systemd[1]: Stopped Detect if the system suffers from bsc#1089761.
+2019-03-22T11:00:42.660023+08:00 15sp1-1 systemd[1]: Stopped Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.660434+08:00 15sp1-1 systemd[1]: Stopping Restore /run/initramfs on shutdown...
+2019-03-22T11:00:42.660712+08:00 15sp1-1 systemd[1]: Stopped Do daily mandb update.
+2019-03-22T11:00:42.660980+08:00 15sp1-1 systemd[1]: Stopped Check if mainboard battery is Ok.
+2019-03-22T11:00:42.661239+08:00 15sp1-1 systemd[1]: Stopped Early Kernel Boot Messages.
+2019-03-22T11:00:42.661471+08:00 15sp1-1 systemd[1]: Stopped Apply settings from /etc/sysconfig/keyboard.
+2019-03-22T11:00:42.661722+08:00 15sp1-1 systemd[1]: Closed LVM2 poll daemon socket.
+2019-03-22T11:00:42.661854+08:00 15sp1-1 systemd[1]: Stopped Backup of RPM database.
+2019-03-22T11:00:42.661990+08:00 15sp1-1 systemd[1]: Stopped Backup of /etc/sysconfig.
+2019-03-22T11:00:42.663466+08:00 15sp1-2 systemd[1]: Started Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.673313+08:00 15sp1-1 systemd[1766]: Stopped target Default.
+2019-03-22T11:00:42.673554+08:00 15sp1-1 systemd[1766]: Stopped target Basic System.
+2019-03-22T11:00:42.673738+08:00 15sp1-1 systemd[1766]: Stopped target Sockets.
+2019-03-22T11:00:42.673880+08:00 15sp1-1 systemd[1766]: Closed D-Bus User Message Bus Socket.
+2019-03-22T11:00:42.674004+08:00 15sp1-1 systemd[1766]: Stopped target Paths.
+2019-03-22T11:00:42.674122+08:00 15sp1-1 systemd[1766]: Reached target Shutdown.
+2019-03-22T11:00:42.674236+08:00 15sp1-1 systemd[1766]: Stopped target Timers.
+2019-03-22T11:00:42.674360+08:00 15sp1-1 systemd[1766]: Starting Exit the Session...
+2019-03-22T11:00:42.674478+08:00 15sp1-1 systemd[1]: Stopping User Manager for UID 0...
+2019-03-22T11:00:42.674594+08:00 15sp1-1 systemd[1]: Stopped Balance block groups on a btrfs filesystem.
+2019-03-22T11:00:42.674701+08:00 15sp1-1 systemd[1]: Stopping iSCSI UserSpace I/O driver...
+2019-03-22T11:00:42.674806+08:00 15sp1-1 systemd[1]: Stopping Getty on tty1...
+2019-03-22T11:00:42.674911+08:00 15sp1-1 systemd[1]: Stopping Command Scheduler...
+2019-03-22T11:00:42.675020+08:00 15sp1-1 systemd[1]: Stopped Daily rotation of log files.
+2019-03-22T11:00:42.675126+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Snapper Snapshots.
+2019-03-22T11:00:42.675231+08:00 15sp1-1 systemd[1]: Removed slice system-systemd\x2dhibernate\x2dresume.slice.
+2019-03-22T11:00:42.675345+08:00 15sp1-1 systemd[1]: Stopped iSCSI UserSpace I/O driver.
+2019-03-22T11:00:42.675452+08:00 15sp1-1 systemd[1]: Stopped OpenSSH Daemon.
+2019-03-22T11:00:42.675561+08:00 15sp1-1 systemd[1]: Stopped Session 1 of user root.
+2019-03-22T11:00:42.683003+08:00 15sp1-1 systemd-logind[819]: Session 1 logged out. Waiting for processes to exit.
+2019-03-22T11:00:42.683239+08:00 15sp1-1 systemd[1]: Stopped Getty on tty1.
+2019-03-22T11:00:42.683375+08:00 15sp1-1 systemd[1]: Stopped Restore /run/initramfs on shutdown.
+2019-03-22T11:00:42.683487+08:00 15sp1-1 systemd-logind[819]: Removed session 1.
+2019-03-22T11:00:42.683603+08:00 15sp1-1 systemd[1]: Starting Show Plymouth Reboot Screen...
+2019-03-22T11:00:42.683861+08:00 15sp1-1 systemd[1]: Removed slice system-getty.slice.
+2019-03-22T11:00:42.686592+08:00 15sp1-1 systemd[1]: Received SIGRTMIN+20 from PID 5230 (plymouthd).
+2019-03-22T11:00:42.687482+08:00 15sp1-2 dbus-daemon[768]: [system] Activating service name='org.opensuse.Snapper' requested by ':1.13' (uid=0 pid=1835 comm="/usr/lib/snapper/systemd-helper --timeline ") (using servicehelper)
+2019-03-22T11:00:42.687871+08:00 15sp1-1 cron[1730]: (CRON) INFO (Shutting down)
+2019-03-22T11:00:42.689646+08:00 15sp1-1 systemd[1]: Stopped Command Scheduler.
+2019-03-22T11:00:42.689784+08:00 15sp1-1 systemd[1]: Stopping Postfix Mail Transport Agent...
+2019-03-22T11:00:42.705412+08:00 15sp1-2 dbus-daemon[768]: [system] Successfully activated service 'org.opensuse.Snapper'
+2019-03-22T11:00:42.745173+08:00 15sp1-2 sbd[1847]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.760480+08:00 15sp1-2 sbd[1851]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.765095+08:00 15sp1-1 systemd[1]: Stopped Postfix Mail Transport Agent.
+2019-03-22T11:00:42.765239+08:00 15sp1-1 systemd[1]: Stopped target Host and Network Name Lookups.
+.INP: transition tags 49
+stonith-sbd
+.INP: # reset timeframe
+.INP: timeframe
+WARNING: 31: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 31: "timeframe" is accepted as "limit"
+.INP: session save _crmsh_regtest
+.INP: session load _crmsh_regtest
+.INP: session
+current session: _crmsh_regtest
+.INP: session pack
+.TRY History 2
+.INP: history
+.INP: session load _crmsh_regtest
+.INP: exclude
+corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
diff --git a/test/testcases/history.post b/test/testcases/history.post
new file mode 100755
index 0000000..b5bb7fc
--- /dev/null
+++ b/test/testcases/history.post
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -r history-test
diff --git a/test/testcases/history.pre b/test/testcases/history.pre
new file mode 100755
index 0000000..4905f13
--- /dev/null
+++ b/test/testcases/history.pre
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -rf history-test
diff --git a/test/testcases/newfeatures b/test/testcases/newfeatures
new file mode 100644
index 0000000..5723625
--- /dev/null
+++ b/test/testcases/newfeatures
@@ -0,0 +1,44 @@
+session New features
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive p0 Dummy params $p0-state:state=1
+primitive p1 Dummy params \
+ rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 \
+ state=2
+primitive p2 Dummy params @p0-state
+property rule #uname eq node1 stonith-enabled=no
+tag tag1: p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+primitive node1 Dummy
+tag ones l1 p1
+alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ trap_node_states="non-trap" \
+ trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to "192.168.40.9"
+alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ select fencing nodes resources \
+ to 192.168.28.188
+show tag:ones and type:location
+show tag:ones and p1
+show
+_test
+verify
+commit
+.
diff --git a/test/testcases/newfeatures.exp b/test/testcases/newfeatures.exp
new file mode 100644
index 0000000..897f315
--- /dev/null
+++ b/test/testcases/newfeatures.exp
@@ -0,0 +1,81 @@
+.TRY New features
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive p0 Dummy params $p0-state:state=1
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p1 Dummy params rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2
+.INP: primitive p2 Dummy params @p0-state
+.INP: property rule #uname eq node1 stonith-enabled=no
+.INP: tag tag1: p0 p1 p2
+.INP: tag tag2 p0 p1 p2
+.INP: location l1 { p0 p1 p2 } inf: node1
+.INP: primitive node1 Dummy
+.INP: tag ones l1 p1
+.INP: alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" trap_node_states="non-trap" trap_resource_tasks="start,stop,monitor,promote,demote" to "192.168.40.9"
+.INP: alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" select attributes { master-prmStateful test1 } to 192.168.28.188
+.INP: alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh select fencing nodes resources to 192.168.28.188
+.INP: show tag:ones and type:location
+location l1 { p0 p1 p2 } inf: node1
+.INP: show tag:ones and p1
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+.INP: show
+node node1
+primitive node1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p0 Dummy \
+ params state=1 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ params @p0-state \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+tag ones l1 p1
+tag tag1 p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+property cib-bootstrap-options: \
+ rule #uname eq node1 \
+ stonith-enabled=no
+alert notify_10 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ select fencing nodes resources \
+ to 192.168.28.188
+alert notify_9 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false trap_node_states=non-trap trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to 192.168.40.9
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: commit
diff --git a/test/testcases/node b/test/testcases/node
new file mode 100644
index 0000000..f0a5fc1
--- /dev/null
+++ b/test/testcases/node
@@ -0,0 +1,14 @@
+node show
+node show node1
+%setenv showobj=node1
+configure primitive p5 Dummy
+configure group g0 p5
+resource maintenance g0
+resource maintenance p5
+-F node maintenance node1
+node ready node1
+node attribute node1 set a1 "1 2 3"
+node attribute node1 show a1
+node attribute node1 delete a1
+node clearstate node1
+
diff --git a/test/testcases/node.exp b/test/testcases/node.exp
new file mode 100644
index 0000000..d91c33c
--- /dev/null
+++ b/test/testcases/node.exp
@@ -0,0 +1,204 @@
+.TRY node show
+node1: member
+.TRY node show node1
+node1: member
+.SETENV showobj=node1
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g0 p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance g0
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY -F node maintenance node1
+INFO: 'maintenance' attribute already exists in p5. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g0. Remove it? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node ready node1
+.EXT crm_attribute -t nodes -N 'node1' -n maintenance -v 'off'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 set a1 "1 2 3"
+.EXT crm_attribute -t nodes -N 'node1' -n 'a1' -v '1 2 3'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 show a1
+.EXT crm_attribute -G -t nodes -N 'node1' -n 'a1'
+scope=nodes name=a1 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 delete a1
+.EXT crm_attribute -D -t nodes -N 'node1' -n 'a1'
+Deleted nodes attribute: id=nodes-node1-a1 name=a1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node clearstate node1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/options b/test/testcases/options
new file mode 100644
index 0000000..44f331b
--- /dev/null
+++ b/test/testcases/options
@@ -0,0 +1,23 @@
+session Options
+options
+reset
+pager cat
+editor vi
+show
+check-frequency never
+check-mode nosuchever
+colorscheme normal,yellow,cyan,red,green,magenta
+colorscheme normal,yellow,cyan,red
+pager nosuchprogram
+skill-level operator
+skill-level joe
+skill-level expert
+output plain
+output misplain
+wait true
+wait off
+wait happy
+show
+save
+.
+options show
diff --git a/test/testcases/options.exp b/test/testcases/options.exp
new file mode 100644
index 0000000..f13d308
--- /dev/null
+++ b/test/testcases/options.exp
@@ -0,0 +1,64 @@
+.TRY Options
+.INP: options
+.INP: reset
+.INP: pager cat
+.INP: editor vi
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "color"
+colorscheme "yellow,normal,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "always"
+check-mode "strict"
+wait "no"
+add-quotes "yes"
+manage-children "ask"
+.INP: check-frequency never
+.INP: check-mode nosuchever
+ERROR: nosuchever not valid (choose one from strict,relaxed)
+.INP: colorscheme normal,yellow,cyan,red,green,magenta
+.INP: colorscheme normal,yellow,cyan,red
+ERROR: bad color scheme: normal,yellow,cyan,red
+.INP: pager nosuchprogram
+ERROR: nosuchprogram does not exist or is not a program
+.INP: skill-level operator
+.INP: skill-level joe
+ERROR: joe not valid (choose one from operator,administrator,expert)
+.INP: skill-level expert
+.INP: output plain
+.INP: output misplain
+ERROR: misplain not valid (choose one from plain,color,uppercase)
+.INP: wait true
+.INP: wait off
+.INP: wait happy
+ERROR: happy not valid (yes or no are valid)
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
+.INP: save
+.TRY options show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
diff --git a/test/testcases/ra b/test/testcases/ra
new file mode 100644
index 0000000..bd44a3a
--- /dev/null
+++ b/test/testcases/ra
@@ -0,0 +1,7 @@
+session RA interface
+ra
+providers IPaddr
+providers Dummy
+info ocf:pacemaker:Dummy
+info stonith:external/ssh
+.
diff --git a/test/testcases/ra.exp b/test/testcases/ra.exp
new file mode 100644
index 0000000..5d15734
--- /dev/null
+++ b/test/testcases/ra.exp
@@ -0,0 +1,150 @@
+.TRY RA interface
+.INP: ra
+.INP: providers IPaddr
+
+heartbeat
+.INP: providers Dummy
+heartbeat pacemaker
+.INP: info ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+Example stateless resource agent (ocf:pacemaker:Dummy)
+
+This is a dummy OCF resource agent. It does absolutely nothing except keep track
+of whether it is running or not, and can be configured so that actions fail or
+take a long time. Its purpose is primarily for testing, and to serve as a
+template for resource agent writers.
+
+Parameters (*: required, []: default):
+
+state (string, [state-file]): State file
+ Location to store the resource state in.
+
+passwd (string): Password
+ Fake password field
+
+fake (string, [dummy]):
+ Fake attribute that can be changed to cause an agent reload
+
+op_sleep (string, [0]): Operation sleep duration in seconds.
+ Number of seconds to sleep during operations. This can be used to test how
+ the cluster reacts to operation timeouts.
+
+fail_start_on (string): Report bogus start failure on specified host
+ Start, migrate_from, and reload-agent actions will return failure if running on
+ the host specified here, but the resource will run successfully anyway (future
+ monitor calls will find it running). This can be used to test on-fail=ignore.
+
+envfile (string): Environment dump file
+ If this is set, the environment will be dumped to this file for every call.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20s
+ stop timeout=20s
+ monitor timeout=20s interval=10s depth=0
+ reload timeout=20s
+ reload-agent timeout=20s
+ migrate_to timeout=20s
+ migrate_from timeout=20s
+.INP: info stonith:external/ssh
+.EXT crm_resource --show-metadata stonith:external/ssh
+.EXT stonithd metadata
+ssh STONITH device (stonith:external/ssh)
+
+ssh-based host reset
+Fine for testing, but not suitable for production!
+Only reboot action supported, no poweroff, and, surprisingly enough, no poweron.
+
+Parameters (*: required, []: default):
+
+hostlist* (string): Hostlist
+ The list of hosts that the STONITH device controls
+
+livedangerously (enum): Live Dangerously!!
+ Set to "yes" if you want to risk your system's integrity.
+ Of course, since this plugin isn't for production, using it
+ in production at all is a bad idea. On the other hand,
+ setting this parameter to yes makes it an even worse idea.
+ Viva la Vida Loca!
+
+pcmk_host_argument (string, [port]): Advanced use only: An alternate parameter to supply instead of 'port'
+ some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of none can be used to tell the cluster not to supply any additional parameters.
+
+pcmk_host_map (string): A mapping of host names to ports numbers for devices that do not support host names.
+ Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2
+
+pcmk_host_list (string): Eg. node1,node2,node3
+ A list of machines controlled by this device (Optional unless pcmk_host_list=static-list)
+
+pcmk_host_check (string, [dynamic-list]): How to determine which machines are controlled by the device.
+ Allowed values: dynamic-list (query the device via the 'list' command), static-list (check the pcmk_host_list attribute), status (query the device via the 'status' command), none (assume every device can fence every machine)
+
+pcmk_delay_max (time, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
+
+pcmk_delay_base (string, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value.This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value per target.
+
+pcmk_action_limit (integer, [1]): The maximum number of actions can be performed in parallel on this device
+ Cluster property concurrent-fencing=true needs to be configured first.Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.
+
+pcmk_reboot_action (string, [reboot]): Advanced use only: An alternate command to run instead of 'reboot'
+ Some devices do not support the standard commands or may provide additional ones.\nUse this to specify an alternate, device-specific, command that implements the 'reboot' action.
+
+pcmk_reboot_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
+
+pcmk_reboot_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up.
+
+pcmk_off_action (string, [off]): Advanced use only: An alternate command to run instead of 'off'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'off' action.
+
+pcmk_off_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'off' actions.
+
+pcmk_off_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'off' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up.
+
+pcmk_on_action (string, [on]): Advanced use only: An alternate command to run instead of 'on'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'on' action.
+
+pcmk_on_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'on' actions.
+
+pcmk_on_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'on' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up.
+
+pcmk_list_action (string, [list]): Advanced use only: An alternate command to run instead of 'list'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'list' action.
+
+pcmk_list_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'list' actions.
+
+pcmk_list_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'list' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up.
+
+pcmk_monitor_action (string, [monitor]): Advanced use only: An alternate command to run instead of 'monitor'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
+
+pcmk_monitor_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.\nUse this to specify an alternate, device-specific, timeout for 'monitor' actions.
+
+pcmk_monitor_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up.
+
+pcmk_status_action (string, [status]): Advanced use only: An alternate command to run instead of 'status'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'status' action.
+
+pcmk_status_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'status' actions.
+
+pcmk_status_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'status' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20
+ stop timeout=15
+ status timeout=20
+ monitor timeout=20 interval=3600
diff --git a/test/testcases/ra.filter b/test/testcases/ra.filter
new file mode 100755
index 0000000..bc57a83
--- /dev/null
+++ b/test/testcases/ra.filter
@@ -0,0 +1,17 @@
+#!/usr/bin/awk -f
+# reduce the providers list to heartbeat and pacemaker
+# (prevents other providers creeping in)
+function reduce(a) {
+ a["heartbeat"]=1; a["pacemaker"]=1;
+ s="";
+ for( i=1; i<=NF; i++ )
+ if( $i in a )
+ s=s" "$i;
+ return substr(s,2);
+}
+n==1 { n=0; print reduce(a); next; }
+/providers IPaddr/ { n=1; }
+/providers Dummy/ { n=1; }
+/^ssh STONITH/ { sub(" external",""); }
+/^state \(string, \[(.*)\]\):/ { gsub(/\[.*\]/, "[state-file]") }
+{ print }
diff --git a/test/testcases/resource b/test/testcases/resource
new file mode 100644
index 0000000..8fad9b6
--- /dev/null
+++ b/test/testcases/resource
@@ -0,0 +1,84 @@
+resource status p0
+%setenv showobj=p3
+resource start p3
+resource stop p3
+%setenv showobj=c1
+resource manage c1
+resource unmanage c1
+%setenv showobj=p2
+resource maintenance p2 on
+resource maintenance p2 off
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1 force
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=p0
+resource param p0 set a0 "1 2 3"
+resource param p0 show a0
+resource param p0 delete a0
+resource meta p0 set m0 123
+resource meta p0 show m0
+resource meta p0 delete m0
+resource trace p0 probe
+resource trace p0 start
+resource trace p0 stop
+resource untrace p0 probe
+resource untrace p0 start
+resource untrace p0 stop
+configure group g p0 p3
+options manage-children never
+resource start g
+resource start p0
+resource stop g
+configure clone cg g
+options manage-children always
+resource start g
+resource stop g
+resource start cg
+resource stop p0
+resource start cg
+resource stop cg
+resource stop p3
+%setenv showobj=
+configure rename p3 p4
+configure primitive p3 Dummy
+resource stop p3
+resource start p3
+resource cleanup
+resource cleanup p3
+resource cleanup p3 node1
+resource cleanup force
+resource cleanup p3 force
+resource cleanup p3 node1 force
+resource refresh
+resource refresh p3
+resource refresh p3 node1
+resource refresh force
+resource refresh p3 force
+resource refresh p3 node1 force
+resource stop p3
+configure rm cg
+configure ms msg g
+resource scores
+%setenv showobj=
+configure primitive p5 Dummy
+configure group g1 p5
+resource manage p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=p5
+-F resource unmanage p5
+%setenv showobj=p5
+-F resource maintenance g1
+resource start p5
+%setenv showobj=g1
+-F resource manage g1
+resource start p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=g1
+-F resource maintenance g1
diff --git a/test/testcases/resource.exp b/test/testcases/resource.exp
new file mode 100644
index 0000000..c03aae7
--- /dev/null
+++ b/test/testcases/resource.exp
@@ -0,0 +1,1450 @@
+.TRY resource status p0
+.EXT crm_resource --locate --resource 'p0'
+resource p0 is NOT running
+.SETENV showobj=p3
+.TRY resource start p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=c1
+.TRY resource manage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource unmanage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p2
+.TRY resource maintenance p2 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p2 off
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="false"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1'
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1 force
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1' --force
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=p0
+.TRY resource param p0 set a0 "1 2 3"
+.EXT crm_resource --resource 'p0' --set-parameter 'a0' --parameter-value '1 2 3'
+Set 'p0' option: id=p0-instance_attributes-a0 set=p0-instance_attributes name=a0 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 show a0
+.EXT crm_resource --resource 'p0' --get-parameter 'a0'
+1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 delete a0
+.EXT crm_resource --resource 'p0' --delete-parameter 'a0'
+Deleted 'p0' option: id=p0-instance_attributes-a0 name=a0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 set m0 123
+.EXT crm_resource --meta --resource 'p0' --set-parameter 'm0' --parameter-value '123'
+Set 'p0' option: id=p0-meta_attributes-m0 set=p0-meta_attributes name=m0 value=123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 show m0
+.EXT crm_resource --meta --resource 'p0' --get-parameter 'm0'
+123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 delete m0
+.EXT crm_resource --meta --resource 'p0' --delete-parameter 'm0'
+Deleted 'p0' option: id=p0-meta_attributes-m0 name=m0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 probe
+INFO: Trace for p0:monitor is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace non-monitor operations
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 start
+INFO: Trace for p0:start is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the start operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 stop
+INFO: Trace for p0:stop is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the stop operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 probe
+INFO: Stop tracing p0 for operation monitor
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 start
+INFO: Stop tracing p0 for operation start
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 stop
+INFO: Stop tracing p0 for operation stop
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g p0 p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children never
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure clone cg g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children always
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY configure rename p3 p4
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure primitive p3 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY resource stop p3
+.TRY resource start p3
+.TRY resource cleanup
+.EXT crm_resource --cleanup
+.TRY resource cleanup p3
+.EXT crm_resource --cleanup --resource p3
+.TRY resource cleanup p3 node1
+.EXT crm_resource --cleanup --resource p3 --node node1
+.TRY resource cleanup force
+.EXT crm_resource --cleanup --force
+.TRY resource cleanup p3 force
+.EXT crm_resource --cleanup --resource p3 --force
+.TRY resource cleanup p3 node1 force
+.EXT crm_resource --cleanup --resource p3 --node node1 --force
+.TRY resource refresh
+.EXT crm_resource --refresh
+.TRY resource refresh p3
+.EXT crm_resource --refresh --resource p3
+.TRY resource refresh p3 node1
+.EXT crm_resource --refresh --resource p3 --node node1
+.TRY resource refresh force
+.EXT crm_resource --refresh --force
+.TRY resource refresh p3 force
+.EXT crm_resource --refresh --resource p3 --force
+.TRY resource refresh p3 node1 force
+.EXT crm_resource --refresh --resource p3 --node node1 --force
+.TRY resource stop p3
+.TRY configure rm cg
+WARNING: This command 'rm' is deprecated, please use 'delete'
+INFO: "rm" is accepted as "delete"
+.TRY configure ms msg g
+WARNING: "ms" is deprecated. Please use "clone msg g meta promotable=true"
+.TRY resource scores
+.EXT crm_simulate -sUL
+2 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Node node1: UNCLEAN (offline)
+
+ st (stonith:null): Stopped
+ Stopped: [ node1 ]
+ Stopped: [ node1 ]
+ p3 (ocf::heartbeat:Dummy): Stopped ( disabled )
+ Stopped: [ node1 ]
+
+Original: node1 capacity:
+pcmk__primitive_assign: st allocation score on node1: 0
+pcmk__clone_assign: c1 allocation score on node1: 0
+pcmk__clone_assign: p1:0 allocation score on node1: 0
+pcmk__primitive_assign: p1:0 allocation score on node1: -INFINITY
+pcmk__clone_assign: m1 allocation score on node1: 0
+pcmk__clone_assign: p2:0 allocation score on node1: 0
+pcmk__primitive_assign: p2:0 allocation score on node1: -INFINITY
+p2:0 promotion score on none: 0
+pcmk__primitive_assign: p3 allocation score on node1: -INFINITY
+pcmk__clone_assign: msg allocation score on node1: 0
+pcmk__clone_assign: g:0 allocation score on node1: 0
+pcmk__clone_assign: p0:0 allocation score on node1: 0
+pcmk__clone_assign: p4:0 allocation score on node1: 0
+pcmk__group_assign: g:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p4:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p4:0 allocation score on node1: -INFINITY
+g:0 promotion score on none: 0
+Remaining: node1 capacity:
+
+.SETENV showobj=
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure group g1 p5
+.TRY resource manage p5
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes-0">
+ <nvpair id="p5-meta_attributes-0-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource unmanage p5
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance g1
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource manage g1
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ <nvpair id="p5-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource maintenance g1
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/rset b/test/testcases/rset
new file mode 100644
index 0000000..798e392
--- /dev/null
+++ b/test/testcases/rset
@@ -0,0 +1,21 @@
+show Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+show o1 c1 c2 c3
+delete d4
+show o1 c1 c2 c3
+_test
+verify
+.
diff --git a/test/testcases/rset-xml b/test/testcases/rset-xml
new file mode 100644
index 0000000..842d4df
--- /dev/null
+++ b/test/testcases/rset-xml
@@ -0,0 +1,19 @@
+showxml Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+delete d4
+_test
+verify
+.
diff --git a/test/testcases/rset-xml.exp b/test/testcases/rset-xml.exp
new file mode 100644
index 0000000..51c431a
--- /dev/null
+++ b/test/testcases/rset-xml.exp
@@ -0,0 +1,53 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" timeout="20" interval="3600" id="st-monitor-3600"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d3" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d5-stop-0s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_order id="o1" kind="Serialize" first="d1" then="d3"/>
+ <rsc_colocation id="c1" score="INFINITY">
+ <resource_set sequential="false" id="c1-1">
+ <resource_ref id="d1"/>
+ <resource_ref id="d3"/>
+ </resource_set>
+ </rsc_colocation>
+ <rsc_colocation id="c2" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ <rsc_colocation id="c3" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ </constraints>
+ </configuration>
+</cib>
diff --git a/test/testcases/rset.exp b/test/testcases/rset.exp
new file mode 100644
index 0000000..79b03f4
--- /dev/null
+++ b/test/testcases/rset.exp
@@ -0,0 +1,66 @@
+.TRY Resource sets
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: primitive st stonith:ssh params hostlist='node1' op start timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: order o1 Serialize: d1 d2 ( d3 d4 )
+.INP: colocation c1 inf: d4 ( d1 d2 d3 )
+.INP: colocation c2 inf: d1 d2 d3 d4
+.INP: colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+.INP: delete d2
+INFO: 16: constraint order:o1 updated
+INFO: 16: constraint colocation:c1 updated
+INFO: 16: constraint colocation:c2 updated
+INFO: 16: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: d4 ( d1 d3 )
+colocation c2 inf: d1 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 )
+order o1 Serialize: d1 ( d3 d4 )
+.INP: delete d4
+INFO: 18: constraint order:o1 updated
+INFO: 18: constraint colocation:c1 updated
+INFO: 18: constraint colocation:c2 updated
+INFO: 18: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ op start timeout=60s interval=0s \
+ op monitor timeout=20 interval=3600 \
+ op stop timeout=15 interval=0s
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: commit
diff --git a/test/testcases/scripts b/test/testcases/scripts
new file mode 100644
index 0000000..b89d75d
--- /dev/null
+++ b/test/testcases/scripts
@@ -0,0 +1,14 @@
+session Cluster scripts
+script
+list
+list all
+list names
+list names all
+list all names
+list bogus
+show mailto
+verify mailto id=foo email=test@example.com subject=hello
+run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+json '["show", "mailto"]'
+json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+.
diff --git a/test/testcases/scripts.exp b/test/testcases/scripts.exp
new file mode 100644
index 0000000..ca086c9
--- /dev/null
+++ b/test/testcases/scripts.exp
@@ -0,0 +1,305 @@
+.TRY Cluster scripts
+.INP: script
+.INP: list
+.EXT crm_resource --show-metadata ocf:heartbeat:apache
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr2
+.EXT crm_resource --show-metadata ocf:heartbeat:Filesystem
+.EXT crm_resource --show-metadata ocf:heartbeat:mysql
+.EXT crm_resource --show-metadata systemd:cryptctl-server
+.EXT crm_resource --show-metadata ocf:heartbeat:db2
+.EXT crm_resource --show-metadata ocf:heartbeat:exportfs
+.EXT crm_resource --show-metadata systemd:haproxy
+.EXT crm_resource --show-metadata ocf:heartbeat:LVM
+.EXT crm_resource --show-metadata ocf:heartbeat:MailTo
+.EXT crm_resource --show-metadata ocf:heartbeat:nginx
+.EXT crm_resource --show-metadata ocf:heartbeat:Raid1
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list all
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Script:
+
+check-uptime Check uptime of nodes
+gfs2-base GFS2 File System Base (Cloned)
+lvm Controls the availability of an LVM Volume Group
+raid1 Manages Linux software RAID (MD) devices on shared storage
+sapdb SAP Database Instance
+sapinstance SAP Instance
+sbd-device Create SBD Device
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list names
+apache
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+haproxy
+health
+libvirt
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sbd
+virtual-ip
+vmware
+.INP: list names all
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list all names
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list bogus
+ERROR: 7: script.list: Unexpected argument 'bogus': expected [all|names]
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("Unexpected argument '%s': expected [all|names]" % (arg))
+ raise ValueError(msg)
+ValueError: Unexpected argument 'bogus': expected [all|names]
+.INP: show mailto
+mailto (Basic)
+E-Mail
+
+Notifies recipient by e-mail in the event of a resource takeover.
+
+1. Notifies recipients by email in the event of resource takeover
+
+ id (required) (unique)
+ Identifier for the cluster resource
+ email (required)
+ Email address
+ subject
+ Subject
+
+
+.INP: verify mailto id=foo email=test@example.com subject=hello
+1. Ensure mail package is installed
+
+ mailx
+
+2. Configure cluster resources
+
+ primitive foo ocf:heartbeat:MailTo
+ email="test@example.com"
+ subject="hello"
+ op start timeout="10"
+ op stop timeout="10"
+ op monitor interval="10" timeout="10"
+
+ clone c-foo foo
+
+.INP: run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+INFO: 10: E-Mail
+INFO: 10: Nodes: node1
+** all - #!/usr/bin/env python3
+import crm_script
+import crm_init
+
+crm_init.install_packages(['mailx'])
+crm_script.exit_ok(True)
+
+INFO: 10: Ensure mail package is installed
+** localhost - temporary file <<END
+primitive foo ocf:heartbeat:MailTo email="test@example.com" subject="hello" op start timeout="10" op stop timeout="10" op monitor interval="10" timeout="10"
+clone c-foo foo
+
+END
+
+** localhost - crm --wait --no configure load update <<temporary file>>
+INFO: 10: Configure cluster resources
+.INP: json '["show", "mailto"]'
+{"category": "basic", "longdesc": "Notifies recipient by e-mail in the event of a resource takeover.", "name": "mailto", "shortdesc": "E-Mail", "steps": [{"longdesc": " This is a resource agent for MailTo. It sends email to a sysadmin\nwhenever a takeover occurs.", "parameters": [{"advanced": false, "longdesc": "", "name": "id", "required": true, "shortdesc": "Identifier for the cluster resource", "type": "resource", "unique": true}, {"advanced": false, "example": "", "longdesc": " The email address of sysadmin.", "name": "email", "required": true, "shortdesc": "Email address", "type": "email", "unique": false}, {"advanced": false, "example": "Resource Group", "longdesc": " The subject of the email.", "name": "subject", "required": false, "shortdesc": "Subject", "type": "string", "unique": false}], "required": true, "shortdesc": "Notifies recipients by email in the event of resource takeover"}]}
+.INP: json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+{"longdesc": "", "name": "install", "nodes": "", "shortdesc": "Ensure mail package is installed", "text": "mailx"}
+{"longdesc": "", "name": "cib", "nodes": "", "shortdesc": "Configure cluster resources", "text": "primitive foo ocf:heartbeat:MailTo\n\temail=\"test@example.com\"\n\tsubject=\"hello\"\n\top start timeout=\"10\"\n\top stop timeout=\"10\"\n\top monitor interval=\"10\" timeout=\"10\"\n\nclone c-foo foo"}
diff --git a/test/testcases/scripts.filter b/test/testcases/scripts.filter
new file mode 100755
index 0000000..05e098a
--- /dev/null
+++ b/test/testcases/scripts.filter
@@ -0,0 +1,4 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\*\* localhost - crm --wait --no configure load update (\/tmp\/crm-tmp-.+)/ { gsub(/.*/, "<<temporary file>>", $NF) }
+{ print }
diff --git a/test/testcases/shadow b/test/testcases/shadow
new file mode 100644
index 0000000..3bfd389
--- /dev/null
+++ b/test/testcases/shadow
@@ -0,0 +1,10 @@
+filesession Shadow CIB management
+cib
+new regtest force
+reset regtest
+use regtest
+commit regtest
+delete regtest
+use
+delete regtest
+.
diff --git a/test/testcases/shadow.exp b/test/testcases/shadow.exp
new file mode 100644
index 0000000..f5ec084
--- /dev/null
+++ b/test/testcases/shadow.exp
@@ -0,0 +1,24 @@
+.TRY Shadow CIB management
+.INP: cib
+.INP: new regtest force
+.EXT >/dev/null </dev/null crm_shadow -b -c 'regtest' --force
+INFO: 2: cib.new: regtest shadow CIB created
+.INP: reset regtest
+.EXT >/dev/null </dev/null crm_shadow -b -r 'regtest'
+INFO: 3: cib.reset: copied live CIB to regtest
+.INP: use regtest
+.INP: commit regtest
+.EXT >/dev/null </dev/null crm_shadow -b -C 'regtest' --force
+INFO: 5: cib.commit: committed 'regtest' shadow CIB to the cluster
+.INP: delete regtest
+ERROR: 6: cib.delete: regtest shadow CIB is in use
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s shadow CIB is in use" % name)
+ raise ValueError(msg)
+ValueError: regtest shadow CIB is in use
+.INP: use
+.INP: delete regtest
+.EXT >/dev/null </dev/null crm_shadow -b -D 'regtest' --force
+INFO: 8: cib.delete: regtest shadow CIB deleted
diff --git a/test/testcases/xmlonly.sh b/test/testcases/xmlonly.sh
new file mode 100755
index 0000000..15e6427
--- /dev/null
+++ b/test/testcases/xmlonly.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# extract the xml cib
+#
+sed -n '/^<?xml/,/^<\/cib>/p'