summaryrefslogtreecommitdiffstats
path: root/test/unittests
diff options
context:
space:
mode:
Diffstat (limited to 'test/unittests')
-rw-r--r--test/unittests/__init__.py64
-rw-r--r--test/unittests/bug-862577_corosync.conf51
-rw-r--r--test/unittests/corosync.conf.181
-rw-r--r--test/unittests/corosync.conf.258
-rw-r--r--test/unittests/corosync.conf.368
-rw-r--r--test/unittests/pacemaker.log923
-rw-r--r--test/unittests/pacemaker.log.23
-rw-r--r--test/unittests/pacemaker_unicode.log30
-rw-r--r--test/unittests/schemas/acls-1.1.rng66
-rw-r--r--test/unittests/schemas/acls-1.2.rng66
-rw-r--r--test/unittests/schemas/constraints-1.0.rng180
-rw-r--r--test/unittests/schemas/constraints-1.1.rng246
-rw-r--r--test/unittests/schemas/constraints-1.2.rng219
-rw-r--r--test/unittests/schemas/fencing.rng29
-rw-r--r--test/unittests/schemas/nvset.rng35
-rw-r--r--test/unittests/schemas/pacemaker-1.0.rng121
-rw-r--r--test/unittests/schemas/pacemaker-1.1.rng161
-rw-r--r--test/unittests/schemas/pacemaker-1.2.rng146
-rw-r--r--test/unittests/schemas/resources-1.0.rng177
-rw-r--r--test/unittests/schemas/resources-1.1.rng225
-rw-r--r--test/unittests/schemas/resources-1.2.rng225
-rw-r--r--test/unittests/schemas/rule.rng137
-rw-r--r--test/unittests/schemas/score.rng18
-rw-r--r--test/unittests/schemas/versions.rng24
-rw-r--r--test/unittests/scripts/inc1/main.yml22
-rw-r--r--test/unittests/scripts/inc2/main.yml26
-rw-r--r--test/unittests/scripts/legacy/main.yml52
-rw-r--r--test/unittests/scripts/templates/apache.xml36
-rw-r--r--test/unittests/scripts/templates/virtual-ip.xml62
-rw-r--r--test/unittests/scripts/unified/main.yml26
-rw-r--r--test/unittests/scripts/v2/main.yml46
-rw-r--r--test/unittests/scripts/vip/main.yml28
-rw-r--r--test/unittests/scripts/vipinc/main.yml14
-rw-r--r--test/unittests/scripts/workflows/10-webserver.xml50
-rw-r--r--test/unittests/test.conf12
-rw-r--r--test/unittests/test_bootstrap.py1905
-rw-r--r--test/unittests/test_bugs.py893
-rw-r--r--test/unittests/test_cib.py32
-rw-r--r--test/unittests/test_cliformat.py324
-rw-r--r--test/unittests/test_corosync.py488
-rw-r--r--test/unittests/test_crashtest_check.py790
-rw-r--r--test/unittests/test_crashtest_main.py215
-rw-r--r--test/unittests/test_crashtest_task.py777
-rw-r--r--test/unittests/test_crashtest_utils.py540
-rw-r--r--test/unittests/test_gv.py36
-rw-r--r--test/unittests/test_handles.py166
-rw-r--r--test/unittests/test_lock.py271
-rw-r--r--test/unittests/test_objset.py40
-rw-r--r--test/unittests/test_ocfs2.py465
-rw-r--r--test/unittests/test_parallax.py104
-rw-r--r--test/unittests/test_parse.py749
-rw-r--r--test/unittests/test_prun.py157
-rw-r--r--test/unittests/test_qdevice.py1031
-rw-r--r--test/unittests/test_ratrace.py131
-rw-r--r--test/unittests/test_report_collect.py588
-rw-r--r--test/unittests/test_report_core.py551
-rw-r--r--test/unittests/test_report_utils.py862
-rw-r--r--test/unittests/test_sbd.py894
-rw-r--r--test/unittests/test_scripts.py914
-rw-r--r--test/unittests/test_service_manager.py84
-rw-r--r--test/unittests/test_sh.py189
-rw-r--r--test/unittests/test_time.py24
-rw-r--r--test/unittests/test_ui_cluster.py173
-rw-r--r--test/unittests/test_upgradeuitl.py54
-rw-r--r--test/unittests/test_utils.py1514
-rw-r--r--test/unittests/test_watchdog.py311
-rw-r--r--test/unittests/test_xmlutil.py61
67 files changed, 19060 insertions, 0 deletions
diff --git a/test/unittests/__init__.py b/test/unittests/__init__.py
new file mode 100644
index 0000000..18f2638
--- /dev/null
+++ b/test/unittests/__init__.py
@@ -0,0 +1,64 @@
+from __future__ import unicode_literals
+import os
+import sys
+
+try:
+ import crmsh
+except ImportError as e:
+ pass
+
+from crmsh import config
+from crmsh import options
+config.core.debug = True
+options.regression_tests = True
+_here = os.path.dirname(__file__)
+config.path.sharedir = os.path.join(_here, "../../doc")
+config.path.crm_dtd_dir = os.path.join(_here, "schemas")
+
+os.environ["CIB_file"] = "test"
+
+
+# install a basic CIB
+from crmsh import cibconfig
+
+_CIB = """
+<cib epoch="0" num_updates="0" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Mon Mar 3 23:58:36 2014" update-origin="ha-one" update-client="crmd" update-user="hacluster" crm_feature_set="3.0.9" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ <nvpair name="no-quorum-policy" value="ignore" id="cib-bootstrap-options-no-quorum-policy"/>
+ <nvpair name="dc-version" value="1.1.11+git20140221.0b7d85a-115.1-1.1.11+git20140221.0b7d85a" id="cib-bootstrap-options-dc-version"/>
+ <nvpair name="cluster-infrastructure" value="corosync" id="cib-bootstrap-options-cluster-infrastructure"/>
+ <nvpair name="symmetric-cluster" value="true" id="cib-bootstrap-options-symmetric-cluster"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="ha-one" uname="ha-one"/>
+ <node id="ha-two" uname="ha-two"/>
+ <node id="ha-three" uname="ha-three"/>
+ </nodes>
+ <resources>
+ </resources>
+ <constraints>
+ </constraints>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <nvpair name="migration-threshold" value="0" id="rsc-options-migration-threshold"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="op-options">
+ <nvpair name="timeout" value="200" id="op-options-timeout"/>
+ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ </status>
+</cib>
+"""
+
+cibconfig.cib_factory.initialize(cib=_CIB)
+
diff --git a/test/unittests/bug-862577_corosync.conf b/test/unittests/bug-862577_corosync.conf
new file mode 100644
index 0000000..09b1225
--- /dev/null
+++ b/test/unittests/bug-862577_corosync.conf
@@ -0,0 +1,51 @@
+# Please read the corosync.conf.5 manual page
+
+service {
+ ver: 1
+ name: pacemaker
+}
+totem {
+ version: 2
+ secauth: off
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+
+# Following are old corosync 1.4.x defaults from SLES
+# token: 5000
+# token_retransmits_before_loss_const: 10
+# join: 60
+# consensus: 6000
+# vsftype: none
+# max_messages: 20
+# threads: 0
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.122.2.13
+ mcastaddr: 239.91.185.71
+ mcastport: 5405
+ ttl: 1
+ }
+}
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 2
+}
diff --git a/test/unittests/corosync.conf.1 b/test/unittests/corosync.conf.1
new file mode 100644
index 0000000..7b3abed
--- /dev/null
+++ b/test/unittests/corosync.conf.1
@@ -0,0 +1,81 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ # crypto_cipher and crypto_hash: Used for mutual node authentication.
+ # If you choose to enable this, then do remember to create a shared
+ # secret with "corosync-keygen".
+ # enabling crypto_cipher, requires also enabling of crypto_hash.
+ crypto_cipher: none
+ crypto_hash: none
+
+ # interface: define at least one interface to communicate
+ # over. If you define more than one interface stanza, you must
+ # also set rrp_mode.
+ interface {
+ # Rings must be consecutively numbered, starting at 0.
+ ringnumber: 0
+ # This is normally the *network* address of the
+ # interface to bind to. This ensures that you can use
+ # identical instances of this configuration file
+ # across all your cluster nodes, without having to
+ # modify this option.
+ bindnetaddr: 192.168.1.0
+ # However, if you have multiple physical network
+ # interfaces configured for the same subnet, then the
+ # network address alone is not sufficient to identify
+ # the interface Corosync should bind to. In that case,
+ # configure the *host* address of the interface
+ # instead:
+ # bindnetaddr: 192.168.1.1
+ # When selecting a multicast address, consider RFC
+ # 2365 (which, among other things, specifies that
+ # 239.255.x.x addresses are left to the discretion of
+ # the network administrator). Do not reuse multicast
+ # addresses across multiple Corosync clusters sharing
+ # the same network.
+ mcastaddr: 239.255.1.1
+ # Corosync uses the port you specify here for UDP
+ # messaging, and also the immediately preceding
+ # port. Thus if you set this to 5405, Corosync sends
+ # messages over UDP ports 5405 and 5404.
+ mcastport: 5405
+ # Time-to-live for cluster communication packets. The
+ # number of hops (routers) that this ring will allow
+ # itself to pass. Note that multicast routing must be
+ # specifically enabled on most network routers.
+ ttl: 1
+ }
+}
+
+logging {
+ # Log the source file and line where messages are being
+ # generated. When in doubt, leave off. Potentially useful for
+ # debugging.
+ fileline: off
+ # Log to standard error. When in doubt, set to no. Useful when
+ # running in the foreground (when invoking "corosync -f")
+ to_stderr: no
+ # Log to a log file. When set to "no", the "logfile" option
+ # must not be set.
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ # Log to the system log daemon. When in doubt, set to yes.
+ to_syslog: yes
+ # Log debug messages (very verbose). When in doubt, leave off.
+ debug: off
+ # Log messages with time stamps. When in doubt, set to on
+ # (unless you are only logging to syslog, where double
+ # timestamps can be annoying).
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ #provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.2 b/test/unittests/corosync.conf.2
new file mode 100644
index 0000000..0438451
--- /dev/null
+++ b/test/unittests/corosync.conf.2
@@ -0,0 +1,58 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.16.35.0
+ mcastport: 5405
+ ttl: 1
+ }
+ transport: udpu
+}
+
+logging {
+ fileline: off
+ to_logfile: yes
+ to_syslog: yes
+ logfile: /var/log/cluster/corosync.log
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.16.35.101
+ nodeid: 1
+ }
+
+ node {
+ ring0_addr: 10.16.35.102
+ nodeid: 2
+ }
+
+ node {
+ ring0_addr: 10.16.35.103
+ }
+
+ node {
+ ring0_addr: 10.16.35.104
+ }
+
+ node {
+ ring0_addr: 10.16.35.105
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.3 b/test/unittests/corosync.conf.3
new file mode 100644
index 0000000..2cc001f
--- /dev/null
+++ b/test/unittests/corosync.conf.3
@@ -0,0 +1,68 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+ secauth: on
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ token: 5000
+ token_retransmits_before_loss_const: 10
+ join: 60
+ consensus: 6000
+ max_messages: 20
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.67.16.0
+ mcastaddr: 239.23.255.56
+ mcastport: 5405
+ ttl: 1
+ }
+
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 0
+ two_node: 0
+ device {
+ votes: 1
+ model: net
+ net {
+ tls: on
+ host: 10.10.10.3
+ port: 5403
+ algorithm: ffsplit
+ tie_breaker: lowest
+ }
+
+ }
+
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.67.18.221
+ nodeid: 172167901
+ }
+
+}
+
diff --git a/test/unittests/pacemaker.log b/test/unittests/pacemaker.log
new file mode 100644
index 0000000..1da52a6
--- /dev/null
+++ b/test/unittests/pacemaker.log
@@ -0,0 +1,923 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: Detected an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: Reading configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: Starting Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: server name: pacemakerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry 58f6784c-39df-4fbe-90df-d462a893c0d4/0x55b94329e2e0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1943 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1944 for process pacemaker-fenced
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1945 for process pacemaker-execd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1946 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1947 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1948 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Starting mainloop
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: server name: lrmd
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: Starting
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (qb_ipcs_us_publish) info: server name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: Starting pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: /var/lib/pacemaker/cib/cib.xml not found: No such file or directory
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.xml (digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Primary configuration corrupt or unusable, trying backups in /var/lib/pacemaker/cib
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Continuing with an empty configuration.
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (validate_with_relaxng) info: Creating RNG parser context
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry ce9f3668-a138-4e36-aec8-124d76e0e8b8/0x5649957b59c0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (main) info: Starting up
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (main) info: CRM Git Version: 2.0.1+20190304.9e909a5bd-1.4 (2.0.1+20190304.9e909a5bd)
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_STARTUP received in state S_STARTING from crmd_init
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (startCib) info: CIB Initialization completed successfully
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 6c579ba7-433c-4d00-88f8-a4a9534cd017/0x56042ff25eb0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_ro
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_rw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_shm
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_init) info: Starting pacemaker-based mainloop
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-0.raw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.0.0 of the CIB to disk (digest: 48469f360effdb63efdbbf08822875d8)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.glQmxd (digest: /var/lib/pacemaker/cib/cib.OJiM5q)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: CIB connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry 97d13205-d013-44ab-bd52-01d8ec4132f7/0x55995aef1580 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Cluster connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_erase_attrs) info: Clearing transient attributes from CIB | xpath=//node_state[@uname='15sp1-1']/transient_attributes
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (qb_ipcs_us_publish) info: server name: attrd
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Accepting attribute updates
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-1]: (null) -> 2 from 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_start_election_if_needed) info: Starting an election to determine the writer
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (election_check) info: election-attrd won by local node
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_declare_winner) notice: Recorded local node as attribute writer (was unset)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 1 private change for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes to all (origin=local/attrd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 0e996957-89f6-4cd2-af8f-271088c53399/0x558f48e15840 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes: OK (rc=0, origin=15sp1-1/attrd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now in unknown state
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (setup_cib) info: Watching for stonith topology changes
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (qb_ipcs_us_publish) info: server name: stonith-ng
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (main) info: Starting pacemaker-fenced mainloop
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (init_cib_cache_cb) info: Updating device list from the cib: init
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (cib_devices_update) info: Updating devices to version 0.0.0
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (unpack_nodes) info: Creating a fake local node
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_ha_control) info: Connected to the cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (lrmd_ipc_connect) info: Connecting to executor
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_lrm_control) info: Connection to the executor established
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, no membership data (0000000000100000)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now member (was in unknown state)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (qb_ipcs_us_publish) info: server name: crmd
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) notice: The local CRM is operational
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_PENDING received in state S_STARTING from do_started
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_STARTING -> S_PENDING | input=I_PENDING cause=C_FSA_INTERNAL origin=do_started
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Election Trigger (I_DC_TIMEOUT) just popped (20000ms)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) warning: Input I_DC_TIMEOUT received in state S_PENDING from crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_PENDING -> S_ELECTION | input=I_DC_TIMEOUT cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (election_check) info: election-DC won by local node
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_ELECTION_DC received in state S_ELECTION from election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_ELECTION -> S_INTEGRATION | input=I_ELECTION_DC cause=C_FSA_INTERNAL origin=election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_te_control) info: Registering TE UUID: 305c37e8-0981-497b-a285-c430070e70ae
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (set_graph_functions) info: Setting custom graph functions
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_takeover) info: Taking over DC status for this partition
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_readwrite) info: We are now in R/W mode
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_master operation for section 'all': OK (rc=0, origin=local/crmd/5, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/6)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/6, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/8)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.0.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.1.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config: <cluster_property_set id="cib-bootstrap-options"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </cluster_property_set>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/8, version=0.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/10)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.1.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.2.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.1+20190304.9e909a5bd-1.4-2.0.1+20190304.9e909a5bd"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/10, version=0.2.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/12)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.2.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.3.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 4
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_all) info: join-1: Waiting on 1 outstanding join acks
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (update_dc) info: Set DC to 15sp1-1 (3.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: update_dc: Node 15sp1-1[1] - expected state is now member (was (null))
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/12, version=0.3.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/14)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.3.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=4
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="hacluster"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/14, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 1df7ee72464178ed9ef4d38760c5c496
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.4.0 with 0.4.0 from 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/19, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/21)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/22)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/23)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.1 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="1" uname="15sp1-1"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/21, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/22, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.1 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.2 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="1" uname="15sp1-1" in_ccm="true" crmd="online" crm-debug-origin="do_lrm_query_internal"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm id="1">
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </node_state>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/23, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=27)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition -1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/25)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/26)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/27)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/25, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.2 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.3 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/26, version=0.4.3)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.3 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.4 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4, @have-quorum=1, @dc-uuid=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/27, version=0.4.4)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-1.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.1.0 of the CIB to disk (digest: da12c1ea82516c83c42bbb6af78f7c43)
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.Q2Mefv (digest: /var/lib/pacemaker/cib/cib.lYgCoM)
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Resource start-up disabled since no STONITH resources have been defined
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Either configure some or disable STONITH with the stonith-enabled option
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status_fencing) info: Node 15sp1-1 is active
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 0, saving inputs in /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Configuration errors found during scheduler processing, please run "crm_verify -L" to identify issues
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 0 (ref=pe_calc-dc-1554260501-7) derived from /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 0 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-0.bz2): Complete
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-2.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.4.0 of the CIB to disk (digest: 41a7a4f3446765b9550c8eed97655f87)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.k86jdq (digest: /var/lib/pacemaker/cib/cib.2VIuZJ)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_apply_diff operation for section 'all' to all (origin=local/cibadmin/2)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.4 2
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.0 (null)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=5, @num_updates=0
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="placement-strategy" value="balanced" id="cib-bootstrap-options-placement-strategy"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <rsc_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="rsc-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="migration-threshold" value="3" id="rsc-options-migration-threshold"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </rsc_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <op_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="op-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="timeout" value="600" id="op-options-timeout"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </op_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_apply_diff operation for section 'all': OK (rc=0, origin=15sp1-1/cibadmin/2, version=0.5.0)
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 0 aborted by cib-bootstrap-options-stonith-enabled doing create stonith-enabled=false: Configuration change | cib=0.5.0 source=te_update_diff_v2:499 path=/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options'] complete=true
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-3.raw
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: 80483689fd341b672c06963bb25bdd6b)
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.ZE4jzT (digest: /var/lib/pacemaker/cib/cib.xCbzOh)
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_config) warning: Blind faith: not fencing unseen nodes
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 1, saving inputs in /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 1 (ref=pe_calc-dc-1554260503-8) derived from /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 1 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-1.bz2): Complete
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:47 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 8d4ba34bb3113e36afd6b6bf39fb69a0 for 0.5.0 (0x5604300bf500 0)
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (crm_procfs_pid_of) info: Found pacemaker-based active as process 1943
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) notice: High CPU load detected: 1.040000
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0100 (was ffffffff)
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) info: Moderate CPU load detected: 0.810000
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0010 (was 0100)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 1f91ec8f-7986-4c15-be46-302b53ff3193/0x558f48ea7bf0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry de9bd295-1272-4db2-bdc6-6b1906ae5553/0x55b9435a50e0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/34)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/35)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/34, version=0.5.0)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.0 2
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.1 (null)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="2" in_ccm="true" crmd="offline" crm-debug-origin="post_cache_update"/>
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/35, version=0.5.1)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 82f3fa83-e1aa-4b46-99aa-91c7dda4969a/0x5649958bed90 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry f747eb13-dfe9-4182-9b3e-00d9f416e88e/0x56042fb54140 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry f2f954cd-386a-4993-9142-8621ae195416/0x55995aef4080 for node (null)/2 (2 total)
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_HA_MESSAGE origin=route_message
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 8
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was (null))
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: e5b55e525a867a8154545eca60a3828b
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.1 with 0.5.1 from 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/37, version=0.5.1)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/38)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/39)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.1 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.2 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="2" uname="15sp1-2"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/38, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/39, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/40)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/41)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.2 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.3 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/40, version=0.5.3)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.3 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.4 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/41, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-4.raw
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: bd5d2bae72ccab0f8431984061bf46bf)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/42)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/43)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/42, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.4 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.5 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=5
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=do_lrm_query_internal, @uname=15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/43, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=48)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/46)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/47)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/48)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/46, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.5 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.6 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=6
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/47, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/48, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 2, saving inputs in /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 2 (ref=pe_calc-dc-1554260554-18) derived from /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 2 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-2.bz2): Complete
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.4bNgNm (digest: /var/lib/pacemaker/cib/cib.MRpEpc)
+Apr 03 11:02:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 44ae77e9ff79c954e9d39a4b11a48f55 for 0.5.6 (0x56042ffaf6e0 0)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: handle_request: Node 15sp1-2[2] - expected state is now down (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (handle_shutdown_request) info: Creating shutdown request for 15sp1-2 (state=S_IDLE)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting shutdown[15sp1-2]: (null) -> 1554260567 from 15sp1-1
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Sent CIB request 4 with 1 change for shutdown (id n/a, set n/a)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/attrd/4)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.6 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.7 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=7
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <transient_attributes id="2"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <instance_attributes id="status-2">
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="status-2-shutdown" name="shutdown" value="1554260567"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </instance_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </transient_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 2 aborted by transient_attributes.2 'create': Transient attribute change | cib=0.5.7 source=abort_unless_down:329 path=/cib/status/node_state[@id='2'] complete=true
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/attrd/4, version=0.5.7)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: CIB update 4 result for shutdown: OK | rc=0
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: * shutdown[15sp1-2]=1554260567
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is shutting down
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (sched_shutdown_op) notice: Scheduling shutdown of node 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (LogNodeActions) notice: * Shutdown 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 3, saving inputs in /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 3 (ref=pe_calc-dc-1554260567-19) derived from /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (te_crm_command) info: Executing crm-event (1) without waiting: do_shutdown on 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 3 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-3.bz2): Complete
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is no longer a peer | DC=true old=0x4000000 new=0x0000000
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting transient_attributes status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/transient_attributes
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes to all (origin=local/crmd/51)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: crmd_peer_down: Node 15sp1-2[2] - join-1 phase confirmed -> none
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: do_shutdown of peer 15sp1-2 is in progress | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/52)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.7 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.8 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/transient_attributes[@id='2']
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=8
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-1/crmd/51, version=0.5.8)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.8 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.9 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=9
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=offline, @crm-debug-origin=peer_update_callback, @join=down, @expected=down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/52, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_remove) notice: Removing all 15sp1-2 attributes for peer loss
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_shutdown_req) info: Peer 15sp1-2 is requesting to shut down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now lost (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) notice: do_shutdown of peer 15sp1-2 is complete | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/53)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/56)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/57)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/53, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/56, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.9 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.10 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=10
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=false, @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/57, version=0.5.10)
+Apr 03 11:02:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0001 (was 0010)
+Apr 03 11:02:52 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 721b7cebe807ad0faf4a6dc35780fe91 for 0.5.10 (0x560430039b10 0)
+Apr 03 11:03:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0000 (was 0001)
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member (was lost)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/58)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.10 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.11 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=11
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=peer_update_callback
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/58, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/61)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/62)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/61, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.11 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.12 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=12
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=true, @crm-debug-origin=post_cache_update
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/62, version=0.5.12)
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 joined
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 5c9c833c-faec-4e40-9451-1eca51fe31c1/0x5649958c6240 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 53c0909d-78ff-49b5-bf79-9ef7ceb014aa/0x56042ffb1100 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry cc7d88cd-ec11-4a95-9820-ec156175b0ca/0x55995aef85e0 for node (null)/2 (2 total)
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 (<unknown>) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.12)
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is now a peer | DC=true old=0x0000000 new=0x4000000
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (te_trigger_stonith_history_sync) info: Fence history will be synchronized cluster-wide within 5 seconds
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_FSA_INTERNAL origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: An unknown node joined - (re-)offer to any unconfirmed nodes
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 16
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Skipping 15sp1-1: already known 4
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/63)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.12 2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.13 (null)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=13
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/63, version=0.5.13)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.13)
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_offer_one: Node 15sp1-2[2] - join-1 phase welcomed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was down)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 9cc271d2c23b97671004273302f97501
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.13 with 0.5.13 from 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/65, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/66)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/67)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/68)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/69)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/66, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/67, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.13 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.14 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=14
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/68, version=0.5.14)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.14 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.15 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=15
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/69, version=0.5.15)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/70)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=76)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/71)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.15 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.16 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/lrm[@id='2']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=16
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/70, version=0.5.16)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.16 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.17 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=17
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/71, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/74)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/75)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/76)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/74, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.17 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.18 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=18
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/75, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/76, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 4, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 4 (ref=pe_calc-dc-1554260614-32) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 4 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-5.raw
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: ca3eacfa6d368fd79cf391411a7d16de)
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.F8yvtW (digest: /var/lib/pacemaker/cib/cib.iAVwFF)
+Apr 03 11:03:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 5d5b6ac1becdd43a5327925a8d1f5579 for 0.5.18 (0x56042ffb12f0 0)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 5, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 5 (ref=pe_calc-dc-1554261514-33) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 5 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 6, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 6 (ref=pe_calc-dc-1554262414-34) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 6 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 7, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 7 (ref=pe_calc-dc-1554263314-35) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 7 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 8, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 8 (ref=pe_calc-dc-1554264214-36) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 8 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 9, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 9 (ref=pe_calc-dc-1554265114-37) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 9 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 10, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 10 (ref=pe_calc-dc-1554266014-38) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 10 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 11, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 11 (ref=pe_calc-dc-1554266914-39) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 11 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 12, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 12 (ref=pe_calc-dc-1554267814-40) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 12 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 13, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 13 (ref=pe_calc-dc-1554268714-41) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 13 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: Current ping state: S_IDLE
diff --git a/test/unittests/pacemaker.log.2 b/test/unittests/pacemaker.log.2
new file mode 100644
index 0000000..bd189cc
--- /dev/null
+++ b/test/unittests/pacemaker.log.2
@@ -0,0 +1,3 @@
+Jan 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Jan 03 11:03:41 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Jan 03 11:03:51 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
diff --git a/test/unittests/pacemaker_unicode.log b/test/unittests/pacemaker_unicode.log
new file mode 100644
index 0000000..47aaa31
--- /dev/null
+++ b/test/unittests/pacemaker_unicode.log
@@ -0,0 +1,30 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+� ∀↱
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: ¶ an ⅓ 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: ⚽ configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: → Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615Ḽơᶉëᶆ ȋṕšᶙṁ ḍỡḽǭᵳ ʂǐť ӓṁệẗ, ĉṓɲṩḙċťᶒțûɾ ấɖḯƥĭṩčįɳġ ḝłįʈ, șếᶑ ᶁⱺ ẽḭŭŝḿꝋď ṫĕᶆᶈṓɍ ỉñḉīḑȋᵭṵńť ṷŧ ḹẩḇőꝛế éȶ đꝍꞎôꝛȇ ᵯáꞡᶇā ąⱡîɋṹẵ.
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: 你好 \xf0\x28\x8c\x28: pac我很特殊
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: κόσμε
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: a a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: \xc3\x28 a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: � d�ectory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-sche,̆dulerd[1947] (qb_ipcs_us_publish) info: 𐀀 name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: �����r-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: ������ directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync' ������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: � � � � � � � � � � � � � � � �
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: �����������������������������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster\xa0\xa1 configuration file /var/lib/pacemaker/cib/cib.xml (﷐﷑﷒﷓﷔﷕﷖﷗﷘﷙﷚﷛﷜﷝﷞﷟﷠﷡﷢﷣﷤﷥﷦﷧﷨﷩﷪﷫﷬﷭﷮﷯digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory \xF0\xA4\xAD (2)
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /v \xf0\x90\x8c\xb car/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0,åabc Fired=0, Skipped=0, \xf0\x28\x8c\xbc Incomplete=0, Source=/var/lib/pacemake \xf0\x90\x28\xbcr/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE fro\xf8\xa1\xa1\xa1\xa1m \xf0\x28\x8c\x28notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition \xfc\xa1\xa1\xa1\xa1\xa1S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: \xfc\xa1\xa1\xa1\xa1\xa1 test_unicode
diff --git a/test/unittests/schemas/acls-1.1.rng b/test/unittests/schemas/acls-1.1.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.1.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/acls-1.2.rng b/test/unittests/schemas/acls-1.2.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.2.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.0.rng b/test/unittests/schemas/constraints-1.0.rng
new file mode 100644
index 0000000..5a4474a
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.0.rng
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.1.rng b/test/unittests/schemas/constraints-1.1.rng
new file mode 100644
index 0000000..fff0fb7
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.1.rng
@@ -0,0 +1,246 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="rsc-pattern"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ </choice>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="domain"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="node"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </group>
+ </choice>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="first-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="then-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.2.rng b/test/unittests/schemas/constraints-1.2.rng
new file mode 100644
index 0000000..221140c
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.2.rng
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/fencing.rng b/test/unittests/schemas/fencing.rng
new file mode 100644
index 0000000..87de5a8
--- /dev/null
+++ b/test/unittests/schemas/fencing.rng
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-stonith"/>
+ </start>
+
+ <define name="element-stonith">
+ <element name="fencing-topology">
+ <zeroOrMore>
+ <ref name="element-level"/>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-level">
+ <element name="fencing-level">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="target"><text/></attribute>
+ <attribute name="index"><data type="positiveInteger"/></attribute>
+ <attribute name="devices">
+ <data type="string">
+ <param name="pattern">([a-zA-Z0-9_\.\-]+)(,[a-zA-Z0-9_\.\-]+)*</param>
+ </data>
+ </attribute>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/nvset.rng b/test/unittests/schemas/nvset.rng
new file mode 100644
index 0000000..0d7e72c
--- /dev/null
+++ b/test/unittests/schemas/nvset.rng
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nvset"/>
+ </start>
+
+ <define name="element-nvset">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <interleave>
+ <optional>
+ <externalRef href="rule.rng"/>
+ </optional>
+ <zeroOrMore>
+ <element name="nvpair">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </element>
+ </zeroOrMore>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ </interleave>
+ </group>
+ </choice>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.0.rng b/test/unittests/schemas/pacemaker-1.0.rng
new file mode 100644
index 0000000..7100393
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.0.rng
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.0.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.0.rng"/>
+ </element>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.1.rng b/test/unittests/schemas/pacemaker-1.1.rng
new file mode 100644
index 0000000..50e9458
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.1.rng
@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.1.rng"/>
+ </element>
+ <optional>
+ <element name="domains">
+ <zeroOrMore>
+ <element name="domain">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="name"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <element name="constraints">
+ <externalRef href="constraints-1.1.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.1.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.2.rng b/test/unittests/schemas/pacemaker-1.2.rng
new file mode 100644
index 0000000..33a7d2d
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.2.rng
@@ -0,0 +1,146 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.2.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.2.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.2.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.0.rng b/test/unittests/schemas/resources-1.0.rng
new file mode 100644
index 0000000..7ea2228
--- /dev/null
+++ b/test/unittests/schemas/resources-1.0.rng
@@ -0,0 +1,177 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.1.rng b/test/unittests/schemas/resources-1.1.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.1.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.2.rng b/test/unittests/schemas/resources-1.2.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.2.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/rule.rng b/test/unittests/schemas/rule.rng
new file mode 100644
index 0000000..242eff8
--- /dev/null
+++ b/test/unittests/schemas/rule.rng
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-rule"/>
+ </start>
+
+ <define name="element-rule">
+ <element name="rule">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="boolean-op">
+ <choice>
+ <value>or</value>
+ <value>and</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role"><text/></attribute>
+ </optional>
+ <oneOrMore>
+ <choice>
+ <element name="expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="attribute"><text/></attribute>
+ <attribute name="operation">
+ <choice>
+ <value>lt</value>
+ <value>gt</value>
+ <value>lte</value>
+ <value>gte</value>
+ <value>eq</value>
+ <value>ne</value>
+ <value>defined</value>
+ <value>not_defined</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type" ann:defaultValue="string">
+ <choice>
+ <value>string</value>
+ <value>number</value>
+ <value>version</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ <element name="date_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="operation"><value>in_range</value></attribute>
+ <choice>
+ <group>
+ <optional>
+ <attribute name="start"><text/></attribute>
+ </optional>
+ <attribute name="end"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="start"><text/></attribute>
+ <element name="duration">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>gt</value></attribute>
+ <attribute name="start"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="operation"><value>lt</value></attribute>
+ <choice>
+ <attribute name="end"><text/></attribute>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>date_spec</value></attribute>
+ <element name="date_spec">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </element>
+ <ref name="element-rule"/>
+ </choice>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="date-common">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="hours"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="monthdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="yearsdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="months"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weeks"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="years"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekyears"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="moon"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/score.rng b/test/unittests/schemas/score.rng
new file mode 100644
index 0000000..57b10f2
--- /dev/null
+++ b/test/unittests/schemas/score.rng
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-score"/>
+ </start>
+
+ <define name="attribute-score">
+ <attribute name="score">
+ <choice>
+ <data type="integer"/>
+ <value>INFINITY</value>
+ <value>+INFINITY</value>
+ <value>-INFINITY</value>
+ </choice>
+ </attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/schemas/versions.rng b/test/unittests/schemas/versions.rng
new file mode 100644
index 0000000..ab4e4ea
--- /dev/null
+++ b/test/unittests/schemas/versions.rng
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-version"/>
+ </start>
+
+ <define name="attribute-version">
+ <attribute name="validate-with">
+ <choice>
+ <value>none</value>
+ <value>pacemaker-0.6</value>
+ <value>transitional-0.6</value>
+ <value>pacemaker-0.7</value>
+ <value>pacemaker-1.0</value>
+ <value>pacemaker-1.1</value>
+ <value>pacemaker-1.2</value>
+ </choice>
+ </attribute>
+ <attribute name="admin_epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="num_updates"><data type="nonNegativeInteger"/></attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/scripts/inc1/main.yml b/test/unittests/scripts/inc1/main.yml
new file mode 100644
index 0000000..8c290d3
--- /dev/null
+++ b/test/unittests/scripts/inc1/main.yml
@@ -0,0 +1,22 @@
+version: 2.2
+shortdesc: Include test script 1
+longdesc: Test if includes work ok
+parameters:
+ - name: foo
+ type: boolean
+ shortdesc: An optional feature
+ - name: bar
+ type: string
+ shortdesc: A string of characters
+ value: the name is the game
+ - name: is-required
+ type: int
+ required: true
+actions:
+ - call: ls /tmp
+ when: foo
+ shortdesc: ls
+ - call: "echo '{{foo}}'"
+ shortdesc: foo
+ - call: "echo '{{bar}}'"
+ shortdesc: bar
diff --git a/test/unittests/scripts/inc2/main.yml b/test/unittests/scripts/inc2/main.yml
new file mode 100644
index 0000000..4910696
--- /dev/null
+++ b/test/unittests/scripts/inc2/main.yml
@@ -0,0 +1,26 @@
+---
+- version: 2.2
+ shortdesc: Includes another script
+ longdesc: This one includes another script
+ parameters:
+ - name: wiz
+ type: string
+ - name: foo
+ type: boolean
+ shortdesc: A different foo
+ include:
+ - script: inc1
+ name: included-script
+ parameters:
+ - name: is-required
+ value: 33
+ actions:
+ - call: "echo 'before {{wiz}}'"
+ shortdesc: before wiz
+ - include: included-script
+ - call: "echo 'after {{foo}}'"
+ shortdesc: after foo
+ - cib: |
+ {{included-script:is-required}}
+ - cib: |
+ {{wiz}}
diff --git a/test/unittests/scripts/legacy/main.yml b/test/unittests/scripts/legacy/main.yml
new file mode 100644
index 0000000..ef5d35b
--- /dev/null
+++ b/test/unittests/scripts/legacy/main.yml
@@ -0,0 +1,52 @@
+---
+- name: Initialize a new cluster
+ description: >
+ Initializes a new cluster on the nodes provided. Will try to
+ configure SSH if not already configured, and install missing
+ packages.
+
+ A more user-friendly interface to this script is provided by the
+ cluster init command.
+ parameters:
+ - name: iface
+ description: "Use the given interface. Try to auto-detect interface by default."
+ default: ""
+
+ - name: transport
+ description: "Corosync transport (mcast or udpu)"
+ default: "udpu"
+
+ - name: bindnetaddr
+ description: "Network address to bind to (e.g.: 192.168.1.0)"
+ default: ""
+
+ - name: mcastaddr
+ description: "Multicast address (e.g.: 239.x.x.x)"
+ default: ""
+
+ - name: mcastport
+ description: "Multicast port"
+ default: 5405
+
+ steps:
+ - name: Configure SSH
+ apply_local: configure.py ssh
+
+ - name: Check state of nodes
+ collect: collect.py
+
+ - name: Verify parameters
+ validate: verify.py
+
+ - name: Install packages
+ apply: configure.py install
+
+ - name: Generate corosync authkey
+ apply_local: authkey.py
+
+ - name: Configure cluster nodes
+ apply: configure.py corosync
+
+ - name: Initialize cluster
+ apply_local: init.py
+
diff --git a/test/unittests/scripts/templates/apache.xml b/test/unittests/scripts/templates/apache.xml
new file mode 100644
index 0000000..faf3ef0
--- /dev/null
+++ b/test/unittests/scripts/templates/apache.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<template name="apache">
+
+<shortdesc lang="en">Apache Web Server</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type apache.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this Apache resource in the cluster.
+</longdesc>
+<content type="string" default="apache"/>
+</parameter>
+
+<parameter name="configfile" required="1">
+<shortdesc lang="en">Apache config file</shortdesc>
+<longdesc lang="en">
+Full pathname of the Apache configuration file</longdesc>
+<content type="string" default="/etc/apache2/httpd.conf"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:apache
+ params
+ configfile="<insert param="configfile"/>"
+ op start timeout="40" op stop timeout="60"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/templates/virtual-ip.xml b/test/unittests/scripts/templates/virtual-ip.xml
new file mode 100644
index 0000000..22ab5bf
--- /dev/null
+++ b/test/unittests/scripts/templates/virtual-ip.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<template name="virtual-ip">
+
+<shortdesc lang="en">Virtual IP Address</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type IPaddr2.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this virtual IP address resource in the cluster.
+</longdesc>
+<content type="string" default="virtual-ip"/>
+</parameter>
+
+<parameter name="ip" required="1">
+<shortdesc lang="en">IP address</shortdesc>
+<longdesc lang="en">
+The IPv4 address to be configured in dotted quad notation,
+for example "192.168.1.1".
+</longdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="netmask">
+<shortdesc lang="en">Netmask</shortdesc>
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0).
+
+If unspecified, it will be determined automatically.
+</longdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="lvs_support">
+<shortdesc lang="en">LVS support</shortdesc>
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+</longdesc>
+<content type="boolean"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:IPaddr2
+ params
+ ip="<insert param="ip"/>"
+ <if set="netmask">cidr_netmask="<insert param="netmask"/>"</if>
+ <if set="lvs_support">lvs_support="<insert param="lvs_support"/>"</if>
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/unified/main.yml b/test/unittests/scripts/unified/main.yml
new file mode 100644
index 0000000..29f5d07
--- /dev/null
+++ b/test/unittests/scripts/unified/main.yml
@@ -0,0 +1,26 @@
+version: 2.2
+shortdesc: Unified Script
+longdesc: >
+ Test if we can define multiple steps in a single script
+category: test
+steps:
+ - parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: Identifier
+ - name: vip
+ shortdesc: Configure the virtual IP
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: IP Identifier
+ - name: ip
+ type: ip_address
+ required: true
+ shortdesc: The IP Address
+actions:
+ - cib: |
+ primitive {{vip:id}} IPaddr2 ip={{vip:ip}}
+ group g-{{id}} {{id}} {{vip:id}}
diff --git a/test/unittests/scripts/v2/main.yml b/test/unittests/scripts/v2/main.yml
new file mode 100644
index 0000000..41822a2
--- /dev/null
+++ b/test/unittests/scripts/v2/main.yml
@@ -0,0 +1,46 @@
+---
+- version: 2.2
+ shortdesc: Apache Webserver
+ longdesc: >
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+ category: Server
+ parameters:
+ - name: id
+ shortdesc: The ID specified here is for the web server resource group.
+ - name: install
+ type: boolean
+ value: true
+ shortdesc: Disable if no installation should be performed
+ include:
+ - agent: test:apache
+ parameters:
+ - name: id
+ value: "{{id}}-server"
+ - name: configfile
+ type: file
+ ops: |
+ op monitor interval=20s timeout=20s
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ value: "{{id}}-ip"
+ - name: ip
+ type: ip_address
+ ops: |
+ op monitor interval=20s timeout=20s
+ actions:
+ - install:
+ - apache2
+ when: install
+ - call: a2enable mod_status
+ shortdesc: Enable status module
+ nodes: all
+ when: install
+ - cib: |
+ {{virtual-ip}}
+ {{apache}}
+ group {{id}}
+ {{virtual-ip:id}}
+ {{apache:id}}
diff --git a/test/unittests/scripts/vip/main.yml b/test/unittests/scripts/vip/main.yml
new file mode 100644
index 0000000..4f3bde1
--- /dev/null
+++ b/test/unittests/scripts/vip/main.yml
@@ -0,0 +1,28 @@
+---
+- version: 2.2
+ shortdesc: Virtual IP
+ category: Basic
+ include:
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: ipaddress
+ required: false
+ - name: lvs_support
+ required: false
+ type: boolean
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+ actions:
+ - include: virtual-ip
diff --git a/test/unittests/scripts/vipinc/main.yml b/test/unittests/scripts/vipinc/main.yml
new file mode 100644
index 0000000..6741885
--- /dev/null
+++ b/test/unittests/scripts/vipinc/main.yml
@@ -0,0 +1,14 @@
+version: 2.2
+category: Test
+shortdesc: Test script include
+include:
+ - script: vip
+ parameters:
+ - name: id
+ value: vip1
+ - name: ip
+ value: 192.168.200.100
+actions:
+ - include: vip
+ - cib: |
+ clone c-{{vip:id}} {{vip:id}}
diff --git a/test/unittests/scripts/workflows/10-webserver.xml b/test/unittests/scripts/workflows/10-webserver.xml
new file mode 100644
index 0000000..f18d55a
--- /dev/null
+++ b/test/unittests/scripts/workflows/10-webserver.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<workflow name="10-webserver">
+
+<shortdesc lang="en">Web Server</shortdesc>
+<longdesc lang="en">
+Configure a resource group containing a virtual IP address and
+an instance of the Apache web server. You may wish to use this
+in conjunction with a filesystem resource; in this case you will
+need to separately configure the filesystem then add colocation
+and ordering constraints to have it start before the resource
+group you create here.
+</longdesc>
+
+<parameters>
+<stepdesc lang="en">
+The ID specified here is for the web server resource group.
+</stepdesc>
+<parameter name="id" required="1">
+<shortdesc lang="en">Group ID</shortdesc>
+<longdesc lang="en">
+Unique ID for the web server resource group in the cluster.
+</longdesc>
+<content type="string" default="web-server"/>
+</parameter>
+</parameters>
+
+<templates>
+<template name="virtual-ip" required="1">
+<stepdesc lang="en">
+The IP address configured here will start before the Apache instance.
+</stepdesc>
+</template>
+<template name="apache" required="1">
+<stepdesc lang="en">
+The Apache configuration file specified here must be available via the
+same path on all cluster nodes, and Apache must be configured with
+mod_status enabled. If in doubt, try running Apache manually via
+its init script first, and ensure http://localhost:80/server-status is
+accessible.
+</stepdesc>
+</template>
+</templates>
+
+<crm_script>
+group <insert param="id"/>
+ <insert param="id" from_template="virtual-ip"/>
+ <insert param="id" from_template="apache"/>
+</crm_script>
+
+</workflow>
diff --git a/test/unittests/test.conf b/test/unittests/test.conf
new file mode 100644
index 0000000..fe75686
--- /dev/null
+++ b/test/unittests/test.conf
@@ -0,0 +1,12 @@
+[path]
+sharedir = ../../doc
+cache = ../../doc
+crm_config = .
+crm_daemon_dir = .
+crm_daemon_user = hacluster
+ocf_root = .
+crm_dtd_dir = .
+pe_state_dir = .
+heartbeat_dir = .
+hb_delnode = ./hb_delnode
+nagios_plugins = .
diff --git a/test/unittests/test_bootstrap.py b/test/unittests/test_bootstrap.py
new file mode 100644
index 0000000..45bf03d
--- /dev/null
+++ b/test/unittests/test_bootstrap.py
@@ -0,0 +1,1905 @@
+"""
+Unitary tests for crmsh/bootstrap.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2019-10-21
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import subprocess
+import unittest
+import yaml
+import socket
+
+import crmsh.sh
+import crmsh.ssh_key
+import crmsh.user_of_host
+import crmsh.utils
+from crmsh.ui_node import NodeMgmt
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import constants
+from crmsh import qdevice
+
+
+class TestContext(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.Context
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ctx_inst = bootstrap.Context()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.bootstrap.Context.initialize_user')
+ def test_set_context(self, mock_initialize_user: mock.MagicMock):
+ options = mock.Mock(yes_to_all=True, ipv6=False)
+ ctx = self.ctx_inst.set_context(options)
+ self.assertEqual(ctx.yes_to_all, True)
+ self.assertEqual(ctx.ipv6, False)
+ mock_initialize_user.assert_called_once()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_return(self, mock_qdevice):
+ self.ctx_inst.initialize_qdevice()
+ mock_qdevice.assert_not_called()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user=None, algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_with_user(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "alice@node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user='alice', algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_together(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.sbd_devices = ["/dev/sda1"]
+ ctx.diskless_sbd = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't use -s and -S options together")
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_sbd_stage_no_option(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.yes_to_all = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Stage sbd should specify sbd device by -s or diskless sbd by -S option")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage_service(self, mock_active, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ mock_active.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't configure stage sbd: sbd.service already running! Please use crm option '-F' if need to redeploy")
+ mock_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage(self, mock_active, mock_check_all):
+ options = mock.Mock(stage="sbd", diskless_sbd=True, cluster_is_running=True)
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ ctx.cluster_is_running = True
+ mock_active.return_value = False
+ ctx._validate_sbd_option()
+ mock_active.assert_called_once_with("sbd.service")
+ mock_check_all.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_option_error_nic_number(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.nic_list = ["eth1", "eth2", "eth3"]
+ with self.assertRaises(SystemExit):
+ ctx.validate_option()
+ mock_error.assert_called_once_with("Maximum number of interface is 2")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ def test_validate_cluster_node_same_name(self, mock_ip_in_local, mock_gethost, mock_fatal):
+ options = mock.Mock(cluster_node="me", type="join")
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "me"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.return_value = ("10.10.10.41", None)
+ mock_ip_in_local.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with("Please specify peer node's hostname or IP address")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ def test_validate_cluster_node_unknown_name(self, mock_gethost, mock_fatal):
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "xxxx"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.side_effect = socket.gaierror("gethostbyname error")
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with('"xxxx": gethostbyname error')
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.Validation.valid_admin_ip')
+ def test_validate_option(self, mock_admin_ip, mock_warn):
+ ctx = crmsh.bootstrap.Context()
+ ctx.admin_ip = "10.10.10.123"
+ ctx.qdevice_inst = mock.Mock()
+ ctx._validate_sbd_option = mock.Mock()
+ ctx._validate_nodes_option = mock.Mock()
+ ctx.validate_option()
+ mock_admin_ip.assert_called_once_with("10.10.10.123")
+ ctx.qdevice_inst.valid_qdevice_options.assert_called_once_with()
+ ctx._validate_sbd_option.assert_called_once_with()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_return(self, mock_status):
+ res = self.ctx_inst.load_specific_profile(None)
+ assert res == {}
+ mock_status.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_not_exist(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("newname")
+ assert res == {}
+ mock_status.assert_called_once_with("\"newname\" profile does not exist in {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("name")
+ assert res == "test"
+ mock_status.assert_called_once_with("Loading \"name\" profile from {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform_s390(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="s390")
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, bootstrap.Context.S390_PROFILE_NAME)
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_not_called()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="xxx")
+ mock_cloud.return_value = "azure"
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, "azure")
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_called_once_with()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_not_exist(self, mock_platform, mock_exists):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = False
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_empty(self, mock_platform, mock_exists, mock_open_file, mock_load):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = ""
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+
+ @mock.patch('crmsh.bootstrap.Context.load_specific_profile')
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file(self, mock_platform, mock_exists, mock_open_file, mock_load, mock_load_specific):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = "data"
+ mock_load_specific.side_effect = [
+ {"name": "xin", "age": 18},
+ {"name": "wang"}
+ ]
+
+ self.ctx_inst.load_profiles()
+ assert self.ctx_inst.profiles_dict == {"name": "wang", "age": 18}
+
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+ mock_load_specific.assert_has_calls([
+ mock.call(bootstrap.Context.DEFAULT_PROFILE_NAME),
+ mock.call("s390")
+ ])
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'alice'
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+
+class TestBootstrap(unittest.TestCase):
+ """
+ Unitary tests for crmsh/bootstrap.py
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.parallax.parallax_call')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.sbd.SBDTimeout.is_sbd_delay_start')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_start_pacemaker(self, mock_installed, mock_enabled, mock_delay_start, mock_start, mock_parallax_call):
+ bootstrap._context = None
+ mock_installed.return_value = True
+ mock_enabled.return_value = True
+ mock_delay_start.return_value = True
+ node_list = ["node1", "node2", "node3", "node4", "node5", "node6"]
+ bootstrap.start_pacemaker(node_list)
+ mock_start.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("corosync.service", remote_addr="node2"),
+ mock.call("corosync.service", remote_addr="node3"),
+ mock.call("corosync.service", remote_addr="node4"),
+ mock.call("corosync.service", remote_addr="node5"),
+ mock.call("corosync.service", remote_addr="node6"),
+ mock.call("pacemaker.service", enable=False, node_list=node_list)
+ ])
+ mock_parallax_call.assert_has_calls([
+ mock.call(node_list, 'mkdir -p /run/systemd/system/sbd.service.d/'),
+ mock.call(node_list, "echo -e '[Service]\nUnsetEnvironment=SBD_DELAY_START' > /run/systemd/system/sbd.service.d/sbd_delay_start_disabled.conf"),
+ mock.call(node_list, "systemctl daemon-reload"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_init_ssh(self, mock_start_service, mock_config_ssh):
+ bootstrap._context = mock.Mock(current_user="alice", user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.init_ssh()
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("alice")
+ ])
+
+ @mock.patch('crmsh.userdir.gethomedir')
+ def test_key_files(self, mock_gethome):
+ mock_gethome.return_value = "/root"
+ expected_res = {"private": "/root/.ssh/id_rsa", "public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
+ self.assertEqual(bootstrap.key_files("root"), expected_res)
+ mock_gethome.assert_called_once_with("root")
+
+ @mock.patch('builtins.open')
+ def test_is_nologin(self, mock_open_file):
+ data = "hacluster:x:90:90:heartbeat processes:/var/lib/heartbeat/cores/hacluster:/sbin/nologin"
+ mock_open_file.return_value = mock.mock_open(read_data=data).return_value
+ assert bootstrap.is_nologin("hacluster") is not None
+ mock_open_file.assert_called_once_with("/etc/passwd")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell_return(self, mock_nologin, mock_status, mock_confirm):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_nologin.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_confirm.assert_called_once_with("Continue?")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell(self, mock_nologin, mock_invoke):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ mock_nologin.return_value = True
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_invoke.assert_called_once_with("usermod -s /bin/bash hacluster", None)
+
+ @mock.patch('crmsh.sh.LocalShell.su_subprocess_run')
+ def test_generate_ssh_key_pair_on_remote(self, mock_su: mock.MagicMock):
+ mock_su.return_value = mock.Mock(returncode=0, stdout=b'')
+ bootstrap.generate_ssh_key_pair_on_remote('local_sudoer', 'remote_host', 'remote_sudoer', 'remote_user')
+ mock_su.assert_has_calls([
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='''
+[ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N ''
+[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub
+'''.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ ),
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='cat ~/.ssh/id_rsa.pub'.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ ),
+ ])
+
+ @mock.patch('crmsh.bootstrap.append_unique')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.detect_file')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ def _test_configure_ssh_key(self, mock_change_shell, mock_key_files, mock_detect, mock_su, mock_append_unique):
+ mock_key_files.return_value = {"private": "/test/.ssh/id_rsa", "public": "/test/.ssh/id_rsa.pub", "authorized": "/test/.ssh/authorized_keys"}
+ mock_detect.side_effect = [True, True, False]
+
+ bootstrap.configure_ssh_key("test")
+
+ mock_change_shell.assert_called_once_with("test")
+ mock_key_files.assert_called_once_with("test")
+ mock_detect.assert_has_calls([
+ mock.call("/test/.ssh/id_rsa"),
+ mock.call("/test/.ssh/id_rsa.pub"),
+ mock.call("/test/.ssh/authorized_keys")
+ ])
+ mock_append_unique.assert_called_once_with("/test/.ssh/id_rsa.pub", "/test/.ssh/authorized_keys", "test")
+ mock_su.assert_called_once_with('test', 'touch /test/.ssh/authorized_keys')
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ def test_configure_ssh_key(self, mock_ensure_key_pair, mock_add):
+ public_key = crmsh.ssh_key.InMemoryPublicKey('foo')
+ mock_ensure_key_pair.return_value = (True, [public_key])
+ bootstrap.configure_ssh_key('alice')
+ mock_ensure_key_pair.assert_called_once_with(None, 'alice')
+ mock_add.assert_called_once_with(None, 'alice', public_key)
+
+ @mock.patch('crmsh.bootstrap.append_to_remote_file')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique_remote(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile", user="root", remote="node1", from_local=True)
+ mock_check.assert_called_once_with("fromfile", "tofile", remote="node1", source_local=True)
+ mock_append.assert_called_once_with("fromfile", "root", "node1", "tofile")
+
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile")
+ mock_check.assert_called_once_with("fromfile", "tofile", remote=None, source_local=False)
+ mock_append.assert_called_once_with("fromfile", "tofile", remote=None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_append_to_remote_file(self, mock_run):
+ bootstrap.append_to_remote_file("fromfile", "root", "node1", "tofile")
+ cmd = "cat fromfile | ssh {} root@node1 'cat >> tofile'".format(constants.SSH_OPTION)
+ mock_run.assert_called_once_with(cmd)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_join_ssh_no_seed_host(self, mock_error):
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh(None, None)
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key_for_secondary_user')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh(
+ self,
+ mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change, mock_swap_2,
+ ):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 0
+
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ mock.call("hacluster"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_called_once_with("node1", "bob", "alice", "bob", "alice", add=True)
+ mock_invoke.assert_called_once_with(
+ "bob",
+ "ssh {} alice@node1 sudo crm cluster init -i eth1 ssh_remote".format(constants.SSH_OPTION),
+ )
+ mock_swap_2.assert_called_once()
+ args, kwargs = mock_swap_2.call_args
+ self.assertEqual(3, len(args))
+ self.assertEqual('node1', args[1])
+ self.assertEqual('hacluster', args[2])
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFile.public_key')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.list_public_key_for_user')
+ @mock.patch('logging.Logger.info')
+ def test_swap_public_ssh_key_for_secondary_user(
+ self,
+ mock_log_info,
+ mock_list_public_key_for_user,
+ mock_ensure_key_pair_exists_for_user,
+ mock_public_key,
+ mock_authorized_key_manager_add,
+ ):
+ mock_shell = mock.Mock(
+ crmsh.sh.ClusterShell,
+ local_shell=mock.Mock(crmsh.sh.LocalShell),
+ user_of_host=mock.Mock(crmsh.user_of_host.UserOfHost),
+ )
+ mock_list_public_key_for_user.return_value = ['~/.ssh/id_rsa', '~/.ssh/id_ed25519']
+ mock_ensure_key_pair_exists_for_user.return_value = (True, [
+ crmsh.ssh_key.InMemoryPublicKey('foo'),
+ crmsh.ssh_key.InMemoryPublicKey('bar'),
+ ])
+ mock_public_key.return_value = 'public_key'
+ crmsh.bootstrap.swap_public_ssh_key_for_secondary_user(mock_shell, 'node1', 'alice')
+ mock_list_public_key_for_user.assert_called_once_with(None, 'alice')
+ mock_ensure_key_pair_exists_for_user.assert_called_once_with('node1', 'alice')
+ mock_authorized_key_manager_add.assert_has_calls([
+ mock.call(None, 'alice', crmsh.ssh_key.InMemoryPublicKey('foo')),
+ mock.call('node1', 'alice', crmsh.ssh_key.KeyFile('~/.ssh/id_rsa')),
+ ])
+ mock_log_info.assert_called_with("A new ssh keypair is generated for user %s@%s.", 'alice', 'node1')
+
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh_bad_credential(self, mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 255
+
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_not_called()
+ mock_invoke.assert_not_called()
+
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key_exception(self, mock_check_passwd, mock_warn, mock_export_ssh_key, mock_import_ssh):
+ mock_check_passwd.return_value = False
+ mock_import_ssh.side_effect = ValueError("Can't get the remote id_rsa.pub from {}: {}")
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+ mock_warn.assert_called_once_with(mock_import_ssh.side_effect)
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key(self, mock_check_passwd, mock_export_ssh, mock_import_ssh):
+ mock_check_passwd.return_value = True
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_export_ssh.assert_called_once_with("bob", "bob", "node1", "alice", "alice")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add_return(self, mock_this_node):
+ ctx = mock.Mock(user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.bootstrap_add(ctx)
+ mock_this_node.assert_not_called()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add(self, mock_this_node, mock_info, mock_run):
+ ctx = mock.Mock(current_user="alice", user_at_node_list=["bob@node2", "carol@node3"], nic_list=["eth1"], use_ssh_agent=False)
+ mock_this_node.return_value = "node1"
+ bootstrap.bootstrap_add(ctx)
+ mock_info.assert_has_calls([
+ mock.call("Adding node node2 to cluster"),
+ mock.call("Running command on node2: crm cluster join -y -i eth1 -c alice@node1"),
+ mock.call("Adding node node3 to cluster"),
+ mock.call("Running command on node3: crm cluster join -y -i eth1 -c alice@node1")
+ ])
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_nodelist(self, mock_run, mock_error):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_run.return_value = (1, None, None)
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_called_once_with('node1', 'crm_node -l')
+ mock_error.assert_called_once_with("Can't fetch cluster nodes list from node1: None")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_hostname(
+ self,
+ mock_run,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_error,
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (1, None, None)
+ ]
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_error.assert_called_once_with("Can't fetch hostname of node1: None")
+
+ @mock.patch('crmsh.bootstrap.swap_key_for_hacluster')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.utils.ssh_copy_id')
+ @mock.patch('crmsh.utils.user_of')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes(
+ self,
+ mock_run,
+ mock_swap,
+ mock_userof,
+ mock_ssh_copy_id: mock.MagicMock,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_change_shell,
+ mock_swap_hacluster
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_userof.return_value = "bob"
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (0, "node1", None)
+ ]
+
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_userof.assert_called_once_with("node2")
+ mock_ssh_copy_id.assert_has_calls([
+ mock.call('carol', 'bob', 'node2')
+ ])
+ mock_swap.assert_has_calls([
+ mock.call('node2', "carol", "bob", "carol", "bob"),
+ mock.call('node2', 'hacluster', 'hacluster', 'carol', 'bob', add=True)
+ ])
+
+ @mock.patch('crmsh.userdir.getuser')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('os.path.join')
+ @mock.patch('os.path.exists')
+ def test_init_ssh_remote_no_sshkey(self, mock_exists, mock_join, mock_append, mock_open_file, mock_key_files, mock_getuser):
+ mock_getuser.return_value = "alice"
+ mock_key_files.return_value = {"private": "/home/alice/.ssh/id_rsa", "public": "/home/alice/.ssh/id_rsa.pub", "authorized": "/home/alice/.ssh/authorized_keys"}
+ mock_exists.side_effect = [False, True, False, False, False]
+ mock_join.side_effect = ["/home/alice/.ssh/id_rsa",
+ "/home/alice/.ssh/id_dsa",
+ "/home/alice/.ssh/id_ecdsa",
+ "/home/alice/.ssh/id_ed25519"]
+ mock_open_file.side_effect = [
+ mock.mock_open().return_value,
+ mock.mock_open(read_data="data1 data2").return_value,
+ mock.mock_open(read_data="data1111").return_value
+ ]
+
+ bootstrap.init_ssh_remote()
+
+ mock_getuser.assert_called_once_with()
+ mock_key_files.assert_called_once_with("alice")
+
+ mock_open_file.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys", 'w'),
+ mock.call("/home/alice/.ssh/authorized_keys", "r+"),
+ mock.call("/home/alice/.ssh/id_rsa.pub")
+ ])
+ mock_exists.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys"),
+ mock.call("/home/alice/.ssh/id_rsa"),
+ mock.call("/home/alice/.ssh/id_dsa"),
+ mock.call("/home/alice/.ssh/id_ecdsa"),
+ mock.call("/home/alice/.ssh/id_ed25519"),
+ ])
+ mock_append.assert_called_once_with("/home/alice/.ssh/id_rsa.pub", "/home/alice/.ssh/authorized_keys")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname(self, mock_run):
+ mock_run.return_value = (0, "Node1", None)
+
+ peer_node = bootstrap.get_node_canonical_hostname('node1')
+ self.assertEqual('Node1', peer_node)
+ mock_run.assert_called_once_with('node1', 'crm_node --name')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.get_node_canonical_hostname('node1')
+
+ mock_run.assert_called_once_with("node1", "crm_node --name")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_local_offline(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = False
+
+ assert bootstrap.is_online() is False
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_on_init_node(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node=None)
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = True
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_peer_offline(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node1')
+ mock_is_online.side_effect = [True, False]
+ bootstrap.COROSYNC_CONF_ORIG = "/tmp/crmsh_tmpfile"
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node1"
+ mock_corosync_conf.side_effect = [ "/etc/corosync/corosync.conf",
+ "/etc/corosync/corosync.conf"]
+
+ bootstrap.is_online()
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node1')
+ mock_corosync_conf.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_copy.assert_called_once_with(bootstrap.COROSYNC_CONF_ORIG, "/etc/corosync/corosync.conf")
+ mock_csync2.assert_called_once_with("/etc/corosync/corosync.conf")
+ mock_stop_service.assert_called_once_with("corosync")
+ mock_error.assert_called_once_with("Cannot see peer node \"node1\", please check the communication IP")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_both_online(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_is_online.side_effect = [True, True]
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node2"
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node2')
+ mock_corosync_conf.assert_not_called()
+ mock_copy.assert_not_called()
+ mock_csync2.assert_not_called()
+ mock_stop_service.assert_not_called()
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update_no_conflicts(self, mock_invoke, mock_invokerc):
+ mock_invokerc.return_value = True
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_called_once_with("csync2 -rm /etc/corosync.conf")
+ mock_invokerc.assert_called_once_with("csync2 -rxv /etc/corosync.conf")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update(self, mock_invoke, mock_invokerc, mock_warn):
+ mock_invokerc.side_effect = [False, False]
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_has_calls([
+ mock.call("csync2 -rm /etc/corosync.conf"),
+ mock.call("csync2 -rf /etc/corosync.conf")
+ ])
+ mock_invokerc.assert_has_calls([
+ mock.call("csync2 -rxv /etc/corosync.conf"),
+ mock.call("csync2 -rxv /etc/corosync.conf")
+ ])
+ mock_warn.assert_called_once_with("/etc/corosync.conf was not synced")
+
+ @mock.patch('crmsh.utils.InterfacesInfo')
+ def test_init_network(self, mock_interfaces):
+ mock_interfaces_inst = mock.Mock()
+ mock_interfaces.return_value = mock_interfaces_inst
+ mock_interfaces_inst.get_default_nic_list_from_route.return_value = ["eth0", "eth1"]
+ bootstrap._context = mock.Mock(ipv6=False, second_heartbeat=False, nic_list=["eth0", "eth1"], default_nic_list=["eth0", "eth1"])
+
+ bootstrap.init_network()
+
+ mock_interfaces.assert_called_once_with(False, False, bootstrap._context.nic_list)
+ mock_interfaces_inst.get_interfaces_info.assert_called_once_with()
+ mock_interfaces_inst.get_default_nic_list_from_route.assert_called_once_with()
+ mock_interfaces_inst.get_default_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.disable_service')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_no_config(self, mock_status, mock_disable):
+ bootstrap._context = mock.Mock(qdevice_inst=None)
+ bootstrap.init_qdevice()
+ mock_status.assert_not_called()
+ mock_disable.assert_called_once_with("corosync-qdevice.service")
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_copy_ssh_key_failed(
+ self,
+ mock_status, mock_check_ssh_passwd_need,
+ mock_configure_ssh_key, mock_ssh_copy_id, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_check_ssh_passwd_need.return_value = True
+ mock_ssh_copy_id.return_value = 255
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+
+ with self.assertRaises(ValueError):
+ bootstrap.init_qdevice()
+
+ mock_status.assert_has_calls([
+ mock.call("Configure Qdevice/Qnetd:"),
+ ])
+ mock_check_ssh_passwd_need.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_called_once_with('bob')
+ mock_ssh_copy_id.assert_called_once_with('bob', 'bob', '10.10.10.123')
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_already_configured(
+ self,
+ mock_status, mock_ssh, mock_configure_ssh_key,
+ mock_qdevice_configured, mock_confirm, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_status.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Qdevice is already configured - overwrite?")
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice(self, mock_info, mock_ssh, mock_configure_ssh_key, mock_qdevice_configured,
+ mock_this_node, mock_list_nodes, mock_adjust_priority, mock_adjust_fence_delay,
+ mock_user_of_host, mock_host_user_config_class):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_this_node.return_value = "192.0.2.100"
+ mock_list_nodes.return_value = []
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = False
+ self.qdevice_with_ip.set_cluster_name = mock.Mock()
+ self.qdevice_with_ip.valid_qnetd = mock.Mock()
+ self.qdevice_with_ip.config_and_start_qdevice = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_host_user_config_class.return_value.add.assert_has_calls([
+ mock.call('bob', '192.0.2.100'),
+ mock.call('bob', '10.10.10.123'),
+ ])
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ self.qdevice_with_ip.set_cluster_name.assert_called_once_with()
+ self.qdevice_with_ip.valid_qnetd.assert_called_once_with()
+ self.qdevice_with_ip.config_and_start_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_service_not_available(
+ self,
+ mock_info, mock_list_nodes, mock_available,
+ mock_host_user_config_class,
+ mock_fatal,
+ ):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
+ mock_list_nodes.return_value = ["node1"]
+ mock_available.return_value = False
+ mock_fatal.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.init_qdevice()
+
+ mock_host_user_config_class.return_value.save_local.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_not_called()
+ mock_fatal.assert_called_once_with("corosync-qdevice.service is not available on node1")
+ mock_available.assert_called_once_with("corosync-qdevice.service", "node1")
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ def test_configure_qdevice_interactive_return(self, mock_prompt):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ bootstrap.configure_qdevice_interactive()
+ mock_prompt.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_confirm(self, mock_confirm, mock_info):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = False
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_installed(self, mock_confirm, mock_info, mock_installed, mock_error):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False]
+ mock_installed.side_effect = ValueError("corosync-qdevice not installed")
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_has_calls([
+ mock.call("Do you want to configure QDevice?"),
+ mock.call("Please install the package manually and press 'y' to continue")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive(self, mock_confirm, mock_info, mock_installed, mock_prompt, mock_qdevice):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_prompt.side_effect = ["alice@qnetd-node", 5403, "ffsplit", "lowest", "on", None]
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+ mock_prompt.assert_has_calls([
+ mock.call("HOST or IP of the QNetd server to be used",
+ valid_func=qdevice.QDevice.check_qnetd_addr),
+ mock.call("TCP PORT of QNetd server", default=5403,
+ valid_func=qdevice.QDevice.check_qdevice_port),
+ mock.call("QNetd decision ALGORITHM (ffsplit/lms)", default="ffsplit",
+ valid_func=qdevice.QDevice.check_qdevice_algo),
+ mock.call("QNetd TIE_BREAKER (lowest/highest/valid node id)", default="lowest",
+ valid_func=qdevice.QDevice.check_qdevice_tie_breaker),
+ mock.call("Whether using TLS on QDevice/QNetd (on/off/required)", default="on",
+ valid_func=qdevice.QDevice.check_qdevice_tls),
+ mock.call("Heuristics COMMAND to run with absolute path; For multiple commands, use \";\" to separate",
+ valid_func=qdevice.QDevice.check_qdevice_heuristics,
+ allow_empty=True)
+ ])
+ mock_qdevice.assert_called_once_with('qnetd-node', port=5403, ssh_user='alice', algo='ffsplit', tie_breaker='lowest', tls='on', cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_no_configured(self, mock_qdevice_configured, mock_error):
+ mock_qdevice_configured.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_error.assert_called_once_with("No QDevice configuration in this cluster")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_not_confirmed(self, mock_qdevice_configured, mock_confirm):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.qdevice.QDevice.remove_certification_files_on_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_config')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_reload(self, mock_qdevice_configured, mock_confirm, mock_reachable, mock_evaluate,
+ mock_status, mock_invoke, mock_status_long, mock_update_votes, mock_remove_config, mock_remove_db,
+ mock_remove_files, mock_adjust_priority, mock_adjust_fence_delay, mock_service_is_active):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = True
+ mock_evaluate.return_value = qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_service_is_active.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+ mock_reachable.assert_called_once_with()
+ mock_evaluate.assert_called_once_with(qdevice.QDEVICE_REMOVE)
+ mock_status.assert_has_calls([
+ mock.call("Disable corosync-qdevice.service"),
+ mock.call("Stopping corosync-qdevice.service")
+ ])
+ mock_invoke.assert_has_calls([
+ mock.call("crm cluster run 'systemctl disable corosync-qdevice'"),
+ mock.call("crm cluster run 'systemctl stop corosync-qdevice'"),
+ mock.call("crm cluster run 'crm corosync reload'")
+ ] )
+ mock_status_long.assert_called_once_with("Removing QDevice configuration from cluster")
+ mock_update_votes.assert_called_once_with()
+ mock_remove_config.assert_called_once_with()
+ mock_remove_db.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_tls_on')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_start_qdevice_on_join_node(self, mock_status_long, mock_is_unicast, mock_add_nodelist,
+ mock_conf, mock_csync2_update, mock_invoke, mock_qdevice_tls,
+ mock_get_value, mock_qdevice, mock_start_service):
+ mock_is_unicast.return_value = False
+ mock_qdevice_tls.return_value = True
+ mock_conf.return_value = "corosync.conf"
+ mock_get_value.return_value = "10.10.10.123"
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+ mock_qdevice_inst.certificate_process_on_join = mock.Mock()
+
+ bootstrap.start_qdevice_on_join_node("node2")
+
+ mock_status_long.assert_called_once_with("Starting corosync-qdevice.service")
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_conf.assert_called_once_with()
+ mock_csync2_update.assert_called_once_with("corosync.conf")
+ mock_invoke.assert_called_once_with("crm corosync reload")
+ mock_qdevice_tls.assert_called_once_with()
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_qdevice.assert_called_once_with("10.10.10.123", cluster_node="node2")
+ mock_qdevice_inst.certificate_process_on_join.assert_called_once_with()
+ mock_start_service.assert_called_once_with("corosync-qdevice.service", enable=True)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.log.LoggerUtils.log_only_to_file')
+ def test_invoke(self, mock_log, mock_run):
+ mock_run.return_value = (0, "output", "error")
+ res = bootstrap.invoke("cmd --option")
+ self.assertEqual(res, (True, "output", "error"))
+ mock_log.assert_has_calls([
+ mock.call('invoke: cmd --option'),
+ mock.call('stdout: output'),
+ mock.call('stderr: error')
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_invokerc(self, mock_invoke):
+ mock_invoke.return_value = (True, None, None)
+ res = bootstrap.invokerc("cmd")
+ self.assertEqual(res, True)
+ mock_invoke.assert_called_once_with("cmd")
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('os.path.isfile')
+ def test_sync_files_to_disk(self, mock_isfile, mock_cluster_cmd):
+ bootstrap.FILES_TO_SYNC = ("file1", "file2")
+ mock_isfile.side_effect = [True, True]
+ bootstrap.sync_files_to_disk()
+ mock_isfile.assert_has_calls([mock.call("file1"), mock.call("file2")])
+ mock_cluster_cmd.assert_called_once_with("sync file1 file2")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay_2node(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(True)
+ mock_run.assert_called_once_with("crm resource param res_1 set pcmk_delay_max {}s".format(constants.PCMK_DELAY_MAX))
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(False)
+ mock_run.assert_called_once_with("crm resource param res_1 delete pcmk_delay_max")
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout_sbd(self, mock_is_active, mock_sbd_timeout):
+ mock_is_active.return_value = True
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration = mock.Mock()
+ bootstrap.adjust_stonith_timeout()
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.bootstrap.get_stonith_timeout_generally_expected')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout(self, mock_is_active, mock_get_timeout, mock_set):
+ mock_is_active.return_value = False
+ mock_get_timeout.return_value = 30
+ bootstrap.adjust_stonith_timeout()
+ mock_set.assert_called_once_with("stonith-timeout", 30, conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults_2node(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(True)
+ mock_set.assert_called_once_with('priority', 1, property_type='rsc_defaults', conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(False)
+ mock_set.assert_called_once_with('priority', 0, property_type='rsc_defaults')
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_fence_agent(self, mock_run):
+ mock_run.return_value = None
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_pcmk_delay(self, mock_run, mock_set):
+ mock_run.return_value = "data"
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+ mock_set.assert_called_once_with("priority-fencing-delay", 0)
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties_no_service(self, mock_is_active):
+ mock_is_active.return_value = False
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.adjust_stonith_timeout')
+ @mock.patch('crmsh.bootstrap.adjust_pcmk_delay_max')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties(self, mock_is_active, mock_2node_qdevice, mock_adj_pcmk, mock_adj_stonith, mock_adj_priority, mock_adj_fence):
+ mock_is_active.return_value = True
+ mock_2node_qdevice.return_value = True
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+ mock_adj_pcmk.assert_called_once_with(True)
+ mock_adj_stonith.assert_called_once_with()
+ mock_adj_priority.assert_called_once_with(True)
+ mock_adj_fence.assert_called_once_with(True)
+
+ @mock.patch('crmsh.utils.cluster_copy_file')
+ def test_sync_file_skip_csync2(self, mock_copy):
+ bootstrap._context = mock.Mock(skip_csync2=True, node_list_in_cluster=["node1", "node2"])
+ bootstrap.sync_file("/file1")
+ mock_copy.assert_called_once_with("/file1", nodes=["node1", "node2"], output=False)
+
+ @mock.patch('crmsh.bootstrap.csync2_update')
+ def test_sync_file(self, mock_csync2_update):
+ bootstrap._context = mock.Mock(skip_csync2=False)
+ bootstrap.sync_file("/file1")
+ mock_csync2_update.assert_called_once_with("/file1")
+
+
+class TestValidation(unittest.TestCase):
+ """
+ Unitary tests for class bootstrap.Validation
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.validate_inst = bootstrap.Validation("10.10.10.1")
+ self.validate_port_inst_in_use = bootstrap.Validation("4567", ["4568"])
+ self.validate_port_inst_out_of_range = bootstrap.Validation("456766")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.IP.is_mcast')
+ def test_is_mcast_addr(self, mock_mcast):
+ mock_mcast.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_mcast_addr()
+ self.assertEqual("10.10.10.1 is not multicast address", str(err.exception))
+ mock_mcast.assert_called_once_with("10.10.10.1")
+
+ def test_is_local_addr(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_local_addr(["20.20.20.1", "20.20.20.2"])
+ self.assertEqual("Address must be a local address (one of ['20.20.20.1', '20.20.20.2'])", str(err.exception))
+
+ def test_is_valid_port_in_use(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_in_use._is_valid_port()
+ self.assertEqual("Port 4567 is already in use by corosync. Leave a gap between multiple rings.", str(err.exception))
+
+ def test_is_valid_port_out_of_range(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_out_of_range._is_valid_port()
+ self.assertEqual("Valid port range should be 1025-65535", str(err.exception))
+
+ @mock.patch('crmsh.bootstrap.Validation._is_mcast_addr')
+ def test_valid_mcast_address(self, mock_mcast):
+ bootstrap.Validation.valid_mcast_address("10.10.10.1")
+ mock_mcast.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_ucast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"])
+ bootstrap.Validation.valid_ucast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_mcast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"],
+ local_network_list=["10.10.10.0"])
+ bootstrap.Validation.valid_mcast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3", "10.10.10.0"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_valid_port')
+ def test_valid_port(self, mock_port):
+ bootstrap.Validation.valid_port("10.10.10.1")
+ mock_port.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ def test_valid_admin_ip_in_use(self, mock_ipv6, mock_invoke):
+ mock_ipv6.return_value = False
+ mock_invoke.return_value = True
+
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst.valid_admin_ip("10.10.10.1")
+ self.assertEqual("Address already in use: 10.10.10.1", str(err.exception))
+
+ mock_ipv6.assert_called_once_with("10.10.10.1")
+ mock_invoke.assert_called_once_with("ping -c 1 10.10.10.1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_cluster_is_active(self, mock_context, mock_init, mock_active,
+ mock_error):
+ mock_context_inst = mock.Mock()
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Cluster is not active - can't execute removing action")
+
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_error.assert_not_called()
+ mock_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice_cluster_node(self, mock_context, mock_init, mock_active, mock_error):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node="node1")
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = True
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Either remove node or qdevice")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_cluster_node(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_status, mock_prompt):
+ mock_context_inst = mock.Mock(yes_to_all=False, cluster_node=None, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_prompt.return_value = None
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_status.assert_called_once_with('Remove This Node from Cluster:\n You will be asked for the IP address or name of an existing node,\n which will be removed from the cluster. This command must be\n executed from a different node in the cluster.\n')
+ mock_prompt.assert_called_once_with("IP address or hostname of cluster node (e.g.: 192.168.1.1)", ".+")
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_confirm(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = False
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_error.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self_need_force(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = True
+ mock_this_node.return_value = "node1"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Removing self requires --force")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.remove_self')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_this_node.return_value = "node1"
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_not_called()
+ mock_self.assert_called_once_with(True)
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node1')
+
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_not_in_cluster(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_list):
+ mock_context_inst = mock.Mock(cluster_node="node2", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node3"]
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Specified node node2 is not configured in cluster! Unable to remove.")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node')
+ @mock.patch('crmsh.bootstrap.remove_node_from_cluster')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node,
+ mock_list, mock_remove, mock_fetch, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node2", qdevice_rm_flag=None, force=True)
+ mock_context.return_value = mock_context_inst
+ mock_active.side_effect = [True, False]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node2"]
+ mock_fetch.return_value = ["node1", "node2"]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_error.assert_not_called()
+ mock_remove.assert_called_once_with('node2')
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node2')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ def test_remove_self_other_nodes(self, mock_this_node, mock_list, mock_run, mock_error):
+ mock_this_node.return_value = 'node1'
+ mock_list.return_value = ["node1", "node2"]
+ mock_run.return_value = (1, '', 'err')
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(cluster_node="node1", yes_to_all=True)
+ bootstrap.remove_self()
+
+ mock_list.assert_called_once_with(include_remote_nodes=False)
+ mock_run.assert_called_once_with("node2", "crm cluster remove -y -c node1")
+ mock_error.assert_called_once_with("Failed to remove this node from node2")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_rm_configuration_files(self, mock_run, mock_installed):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ mock_installed.return_value = True
+ bootstrap.rm_configuration_files()
+ mock_run.assert_has_calls([
+ mock.call('rm -f file1 file2', None),
+ mock.call('cp /usr/share/fillup-templates/sysconfig.sbd /etc/sysconfig/sbd', None)
+ ])
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip_host(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["node1", "node2"]
+ self.assertIsNone(bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_not_called()
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_get_iplist.return_value = ["10.10.10.1"]
+ self.assertEqual("10.10.10.1", bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_called_once_with('node1')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_stop_services(self, mock_active, mock_status, mock_stop):
+ mock_active.side_effect = [True, True, True, True]
+ bootstrap.stop_services(bootstrap.SERVICES_STOP_LIST)
+ mock_active.assert_has_calls([
+ mock.call("corosync-qdevice.service", remote_addr=None),
+ mock.call("corosync.service", remote_addr=None),
+ mock.call("hawk.service", remote_addr=None),
+ mock.call("csync2.socket", remote_addr=None)
+ ])
+ mock_status.assert_has_calls([
+ mock.call('Stopping the %s%s', 'corosync-qdevice.service', ''),
+ mock.call('Stopping the %s%s', 'corosync.service', ''),
+ mock.call('Stopping the %s%s', 'hawk.service', ''),
+ mock.call('Stopping the %s%s', 'csync2.socket', '')
+ ])
+ mock_stop.assert_has_calls([
+ mock.call("corosync-qdevice.service", disable=True, remote_addr=None),
+ mock.call("corosync.service", disable=True, remote_addr=None),
+ mock.call("hawk.service", disable=True, remote_addr=None),
+ mock.call("csync2.socket", disable=True, remote_addr=None)
+ ])
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_node_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_error.assert_called_once_with("Failed to remove node1.")
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_csync_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_invokerc, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = True
+ mock_invokerc.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_invokerc.assert_has_calls([
+ mock.call("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ ])
+ mock_error.assert_called_once_with("Removing the node node1 from {} failed".format(bootstrap.CSYNC2_CFG))
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.bootstrap.decrease_expected_votes')
+ @mock.patch('crmsh.corosync.del_node')
+ @mock.patch('crmsh.corosync.get_values')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, mock_status,
+ mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, mock_decrease, mock_csync2,
+ mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, mock_is_active, mock_cal_delnode):
+ mock_get_ip.return_value = "10.10.10.1"
+ mock_cal_delnode.return_value = True
+ mock_invoke.side_effect = [(True, None, None)]
+ mock_invokerc.return_value = True
+ mock_get_values.return_value = ["10.10.10.1"]
+ mock_is_active.return_value = False
+
+ bootstrap._context = mock.Mock(cluster_node="node1", rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_has_calls([
+ mock.call("Removing the node node1"),
+ mock.call("Propagating configuration changes across the remaining nodes")
+ ])
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_cal_delnode.assert_called_once_with("node1")
+ mock_invoke.assert_has_calls([
+ mock.call("corosync-cfgtool -R")
+ ])
+ mock_invokerc.assert_called_once_with("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ mock_error.assert_not_called()
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_del.assert_called_once_with("10.10.10.1")
+ mock_decrease.assert_called_once_with()
+ mock_csync2.assert_has_calls([
+ mock.call(bootstrap.CSYNC2_CFG),
+ mock.call("/etc/corosync/corosync.conf")
+ ])
diff --git a/test/unittests/test_bugs.py b/test/unittests/test_bugs.py
new file mode 100644
index 0000000..725b020
--- /dev/null
+++ b/test/unittests/test_bugs.py
@@ -0,0 +1,893 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import cibconfig
+from lxml import etree
+from crmsh import xmlutil
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+ factory._push_state()
+
+
+def teardown_function():
+ factory._pop_state()
+
+
+def test_bug41660_1():
+ xml = """<primitive id="bug41660" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="bug41660-meta"> \
+ <nvpair id="bug41660-meta-target-role" name="target-role" value="Stopped"/> \
+ </meta_attributes> \
+ </primitive>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive bug41660 ocf:pacemaker:Dummy meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("bug41660", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_2():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta">
+ <nvpair id="libvirtd-interleave" name="interleave" value="true"/>
+ <nvpair id="libvirtd-ordered" name="ordered" value="true"/>
+ <nvpair id="libvirtd-clone-meta-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ #data = obj.repr_cli(format_mode=-1)
+ #print data
+ #exp = 'clone libvirtd-clone libvirtd meta interleave=true ordered=true target-role=Stopped'
+ #assert data == exp
+ #assert obj.cli_use_validate()
+
+ print(etree.tostring(obj.node))
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ print("PRE", etree.tostring(obj.node))
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ print("POST", etree.tostring(obj.node))
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_3():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta_attributes">
+ <nvpair id="libvirtd-clone-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone libvirtd-clone libvirtd meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_comments():
+ xml = """<cib epoch="25" num_updates="1" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Thu Mar 6 15:53:49 2014" update-origin="beta1" update-client="cibadmin" update-user="root" crm_feature_set="3.0.8" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.11-3.3-3ca8c3b"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <!--# COMMENT TEXT 1 -->
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="beta1" id="1">
+ <!--# COMMENT TEXT 2 -->
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <!--# COMMENT TEXT 3 -->
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="beta1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources/>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-shutdown" name="shutdown" value="0"/>
+ <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>"""
+ elems = etree.fromstring(xml)
+ xmlutil.sanitize_cib(elems)
+ assert xmlutil.xml_tostring(elems).count("COMMENT TEXT") == 3
+
+
+def test_eq1():
+ xml1 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ xml2 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ e1 = etree.fromstring(xml1)
+ e2 = etree.fromstring(xml2)
+ assert xmlutil.xml_equals(e1, e2, show=True)
+
+
+def test_pcs_interop_1():
+ """
+ pcs<>crmsh interop bug
+ """
+
+ xml = """<clone id="dummies">
+ <meta_attributes id="dummies-meta">
+ <nvpair name="globally-unique" value="false" id="dummies-meta-globally-unique"/>
+ </meta_attributes>
+ <meta_attributes id="dummies-meta_attributes">
+ <nvpair id="dummies-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="dummy-1" class="ocf" provider="heartbeat" type="Dummy"/>
+ </clone>"""
+ elem = etree.fromstring(xml)
+ from crmsh.ui_resource import set_deep_meta_attr_node
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+ print("BEFORE:", etree.tostring(elem))
+
+ set_deep_meta_attr_node(elem, 'target-role', 'Stopped')
+
+ print("AFTER:", etree.tostring(elem))
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+
+def test_bnc878128():
+ """
+ L3: "crm configure show" displays XML information instead of typical crm output.
+ """
+ xml = """<rsc_location id="cli-prefer-dummy-resource" rsc="dummy-resource"
+role="Started">
+ <rule id="cli-prefer-rule-dummy-resource" score="INFINITY">
+ <expression id="cli-prefer-expr-dummy-resource" attribute="#uname"
+operation="eq" value="x64-4"/>
+ <date_expression id="cli-prefer-lifetime-end-dummy-resource" operation="lt"
+end="2014-05-17 17:56:11Z"/>
+ </rule>
+</rsc_location>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'location cli-prefer-dummy-resource dummy-resource role=Started rule #uname eq x64-4 and date lt "2014-05-17 17:56:11Z"'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_order_without_score_kind():
+ """
+ Spec says order doesn't require score or kind to be set
+ """
+ xml = '<rsc_order first="a" first-action="promote" id="order-a-b" then="b" then-action="start"/>'
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'order order-a-b a:promote b:start'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+
+def test_bnc878112():
+ """
+ crm configure group can hijack a cloned primitive (and then crash)
+ """
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('group', 'g1', 'p1')
+ assert obj2 is True
+ obj3 = factory.create_object('group', 'g2', 'p1')
+ print(obj3)
+ assert obj3 is False
+
+
+def test_copy_nvpairs():
+ from crmsh.cibconfig import copy_nvpairs
+
+ to = etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ ''')
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="false"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['false'] == to.xpath('./nvpair/@value')
+
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['true'] == to.xpath('./nvpair/@value')
+
+
+def test_pengine_test():
+ xml = '''<primitive class="ocf" id="rsc1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="rsc1-instance_attributes-1">
+ <nvpair id="rsc1-instance_attributes-1-state" name="state" value="/var/run/Dummy-rsc1-clusterA"/>
+ <rule id="rsc1-instance_attributes-1-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-1-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterA"/>
+ </rule>
+ </instance_attributes>
+ <instance_attributes id="rsc1-instance_attributes-2">
+ <nvpair id="rsc1-instance_attributes-2-state" name="state" value="/var/run/Dummy-rsc1-clusterB"/>
+ <rule id="rsc1-instance_attributes-2-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-2-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterB"/>
+ </rule>
+ </instance_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc1 ocf:pacemaker:Dummy params rule 0: #cluster-name eq clusterA state="/var/run/Dummy-rsc1-clusterA" params rule 0: #cluster-name eq clusterB state="/var/run/Dummy-rsc1-clusterB" op monitor interval=10'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_tagset():
+ xml = '''<primitive class="ocf" id="%s" provider="pacemaker" type="Dummy"/>'''
+ tag = '''<tag id="t0"><obj_ref id="r1"/><obj_ref id="r2"/></tag>'''
+ factory.create_from_node(etree.fromstring(xml % ('r1')))
+ factory.create_from_node(etree.fromstring(xml % ('r2')))
+ factory.create_from_node(etree.fromstring(xml % ('r3')))
+ factory.create_from_node(etree.fromstring(tag))
+ elems = factory.get_elems_on_tag("tag:t0")
+ assert set(x.obj_id for x in elems) == set(['r1', 'r2'])
+
+
+def test_op_role():
+ xml = '''<primitive class="ocf" id="rsc2" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor" role="Stopped"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc2 ocf:pacemaker:Dummy op monitor interval=10 role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nvpair_no_value():
+ xml = '''<primitive class="ocf" id="rsc3" provider="heartbeat" type="Dummy">
+ <instance_attributes id="rsc3-instance_attributes-1">
+ <nvpair id="rsc3-instance_attributes-1-verbose" name="verbose"/>
+ <nvpair id="rsc3-instance_attributes-1-verbase" name="verbase" value=""/>
+ <nvpair id="rsc3-instance_attributes-1-verbese" name="verbese" value=" "/>
+ </instance_attributes>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc3 Dummy params verbose verbase="" verbese=" "'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_delete_ticket():
+ xml0 = '<primitive id="daa0" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml1 = '<primitive id="daa1" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml2 = '''<rsc_ticket id="taa0" ticket="taaA">
+ <resource_set id="taa0-0">
+ <resource_ref id="daa0"/>
+ <resource_ref id="daa1"/>
+ </resource_set>
+ </rsc_ticket>'''
+ for x in (xml0, xml1, xml2):
+ data = etree.fromstring(x)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+
+ factory.delete('daa0')
+ assert factory.find_object('daa0') is None
+ assert factory.find_object('taa0') is not None
+
+
+def test_quotes():
+ """
+ Parsing escaped quotes
+ """
+ xml = '''<primitive class="ocf" id="q1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="q1-instance_attributes-1">
+ <nvpair id="q1-instance_attributes-1-state" name="state" value="foo&quot;foo&quot;"/>
+ </instance_attributes>
+ </primitive>
+ '''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive q1 ocf:pacemaker:Dummy params state="foo\\"foo\\""'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs():
+ """
+ bug with parsing node attrs
+ """
+ xml = '''<node id="1" uname="dell71"> \
+ <instance_attributes id="dell71-instance_attributes"> \
+ <nvpair name="staging-0-0-placement" value="true" id="dell71-instance_attributes-staging-0-0-placement"/> \
+ <nvpair name="meta-0-0-placement" value="true" id="dell71-instance_attributes-meta-0-0-placement"/> \
+ </instance_attributes> \
+ <instance_attributes id="nodes-1"> \
+ <nvpair id="nodes-1-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>'''
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node 1: dell71 attributes staging-0-0-placement=true meta-0-0-placement=true attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs2():
+ xml = """<node id="h04" uname="h04"> \
+ <utilization id="h04-utilization"> \
+ <nvpair id="h04-utilization-utl_ram" name="utl_ram" value="1200"/> \
+ <nvpair id="h04-utilization-utl_cpu" name="utl_cpu" value="200"/> \
+ </utilization> \
+ <instance_attributes id="nodes-h04"> \
+ <nvpair id="nodes-h04-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node h04 utilization utl_ram=1200 utl_cpu=200 attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_group_constraint_location():
+ """
+ configuring a location constraint on a grouped resource is OK
+ """
+ factory.create_object('node', 'node1')
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('location', 'loc-p1', 'p1', 'inf:', 'node1')
+ c = factory.find_object('loc-p1')
+ assert c and c.check_sanity() == 0
+
+
+def test_group_constraint_colocation():
+ """
+ configuring a colocation constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2', 'inf:', 'p1', 'p2')
+ c = factory.find_object('coloc-p1-p2')
+ assert c and c.check_sanity() > 0
+
+
+def test_group_constraint_colocation_rscset():
+ """
+ configuring a constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_clone_constraint_colocation_rscset():
+ """
+ configuring a constraint on a cloned resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('clone', 'c1', 'p1')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_existing_node_resource():
+ factory.create_object('primitive', 'ha-one', 'Dummy')
+
+ n = factory.find_node('ha-one')
+ assert factory.test_element(n)
+
+ r = factory.find_resource('ha-one')
+ assert factory.test_element(r)
+
+ assert n != r
+
+ assert factory.check_structure()
+ factory.cli_use_validate_all()
+
+ ok, s = factory.mkobj_set('ha-one')
+ assert ok
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_existing_node_resource_2(mock_incr, mock_line_num):
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ from crmsh import clidisplay
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text += "\nprimitive ha-one Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ text2 = obj.repr()
+
+ assert sorted(text.split('\n')) == sorted(text2.split('\n'))
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_1(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node node1
+primitive p0 ocf:pacemaker:Dummy
+primitive p1 ocf:pacemaker:Dummy
+primitive p2 ocf:heartbeat:Delay \
+ params startdelay=2 mondelay=2 stopdelay=2
+primitive p3 ocf:pacemaker:Dummy
+primitive st stonith:null params hostlist=node1
+clone c1 p1
+ms m1 p2
+op_defaults timeout=60s
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\nprimitive p2 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\ngroup g1 p1 p2"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text.replace("group g1 p1 p2", "group g1 p1 p3")
+ text = text + "\nprimitive p3 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ print(obj.repr().strip())
+ assert obj.repr().strip() == "group g1 p1 p3"
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_3(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""node node1
+primitive node1 Dummy params fake=something
+ """)
+ assert ok
+
+ print("** baseline")
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ print(obj.repr())
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""primitive node1 Dummy params fake=something-else
+ """, remove=False, method='update')
+ assert ok
+
+ print("** end")
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib, remove=True, method='replace')
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_2(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node 168633610: webui
+node 168633611: node1
+rsc_template web-server apache \
+ params port=8000 \
+ op monitor interval=10s
+primitive d0 Dummy \
+ meta target-role=Started
+primitive d1 Dummy
+primitive d2 Dummy
+# Never use this STONITH agent in production!
+primitive development-stonith stonith:null \
+ params hostlist="webui node1 node2 node3"
+primitive proxy systemd:haproxy \
+ op monitor interval=10s
+primitive proxy-vip IPaddr2 \
+ params ip=10.13.37.20
+primitive srv1 @web-server
+primitive srv2 @web-server
+primitive vip1 IPaddr2 \
+ params ip=10.13.37.21 \
+ op monitor interval=20s
+primitive vip2 IPaddr2 \
+ params ip=10.13.37.22 \
+ op monitor interval=20s
+primitive virtual-ip IPaddr2 \
+ params ip=10.13.37.77 lvs_support=false \
+ op start timeout=20 interval=0 \
+ op stop timeout=20 interval=0 \
+ op monitor interval=10 timeout=20
+primitive yet-another-virtual-ip IPaddr2 \
+ params ip=10.13.37.72 cidr_netmask=24 \
+ op start interval=0 timeout=20 \
+ op stop interval=0 timeout=20 \
+ op monitor interval=10 timeout=20 \
+ meta target-role=Started
+group dovip d0 virtual-ip \
+ meta target-role=Stopped
+group g-proxy proxy-vip proxy
+group g-serv1 vip1 srv1
+group g-serv2 vip2 srv2
+clone d2-clone d2 \
+ meta target-role=Started
+tag dummytag d0 d1 d1-on-node1 d2 d2-clone
+# Never put the two web servers on the same node
+colocation co-serv -inf: g-serv1 g-serv2
+location d1-on-node1 d1 inf: node1
+# Never put any web server or haproxy on webui
+location l-avoid-webui { g-proxy g-serv1 g-serv2 } -inf: webui
+# Prever to spread groups across nodes
+location l-proxy g-proxy 200: node1
+location l-serv1 g-serv1 200: node2
+location l-serv2 g-serv2 200: node3
+property cib-bootstrap-options: \
+ have-watchdog=false \
+ dc-version="1.1.13+git20150917.20c2178-224.2-1.1.13+git20150917.20c2178" \
+ cluster-infrastructure=corosync \
+ cluster-name=hacluster \
+ stonith-enabled=true \
+ no-quorum-policy=ignore
+rsc_defaults rsc-options: \
+ resource-stickiness=1 \
+ migration-threshold=3
+op_defaults op-options: \
+ timeout=600 \
+ record-pending=true
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+def test_bug_110():
+ """
+ configuring attribute-based fencing-topology
+ """
+ factory.create_object(*"primitive stonith-libvirt stonith:null".split())
+ factory.create_object(*"primitive fence-nova stonith:null".split())
+ cmd = "fencing_topology attr:OpenStack-role=compute stonith-libvirt,fence-nova".split()
+ ok = factory.create_object(*cmd)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ for o in obj.obj_set:
+ if o.node.tag == 'fencing-topology':
+ assert o.check_sanity() == 0
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_reordering_resource_sets(mock_incr, mock_line_num):
+ """
+ Can we reorder resource sets?
+ """
+ from crmsh import clidisplay
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('primitive', 'p2', 'Dummy')
+ assert obj2 is True
+ obj3 = factory.create_object('primitive', 'p3', 'Dummy')
+ assert obj3 is True
+ obj4 = factory.create_object('primitive', 'p4', 'Dummy')
+ assert obj4 is True
+ o1 = factory.create_object('order', 'o1', 'p1', 'p2', 'p3', 'p4')
+ assert o1 is True
+
+ obj = cibconfig.mkset_obj('o1')
+ assert obj is not None
+ rc = obj.save('order o1 p4 p3 p2 p1')
+ assert rc == True
+
+ obj2 = cibconfig.mkset_obj('o1')
+ with clidisplay.nopretty():
+ assert "order o1 p4 p3 p2 p1" == obj2.repr().strip()
+
+
+def test_bug959895():
+ """
+ Allow importing XML with cloned groups
+ """
+ xml = """<clone id="c-bug959895">
+ <group id="g-bug959895">
+ <primitive id="p-bug959895-a" class="ocf" provider="pacemaker" type="Dummy" />
+ <primitive id="p-bug959895-b" class="ocf" provider="pacemaker" type="Dummy" />
+ </group>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone c-bug959895 g-bug959895'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("c-bug959895", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_node_util_attr():
+ """
+ Handle node with utitilization before attributes correctly
+ """
+ xml = """<node id="aberfeldy" uname="aberfeldy">
+ <utilization id="nodes-aberfeldy-utilization">
+ <nvpair id="nodes-aberfeldy-utilization-cpu" name="cpu" value="2"/>
+ <nvpair id="nodes-aberfeldy-utilization-memory" name="memory" value="500"/>
+ </utilization>
+ <instance_attributes id="nodes-aberfeldy">
+ <nvpair id="nodes-aberfeldy-standby" name="standby" value="on"/>
+ </instance_attributes>
+</node>"""
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'node aberfeldy utilization cpu=2 memory=500 attributes standby=on'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_dup_create_same_name():
+ """
+ Creating two objects with the same name
+ """
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert ok
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert not ok
+
+
+def test_dup_create():
+ """
+ Creating property sets with unknown properties
+ """
+ ok = factory.create_object(*"property hana_test1: hana_attribute_1=5 hana_attribute_2=mohican".split())
+ assert ok
+ ok = factory.create_object(*"property hana_test2: hana_attribute_1=5s a-b-c-d=e-f-g".split())
+ assert ok
diff --git a/test/unittests/test_cib.py b/test/unittests/test_cib.py
new file mode 100644
index 0000000..def915f
--- /dev/null
+++ b/test/unittests/test_cib.py
@@ -0,0 +1,32 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+from crmsh import cibconfig
+from lxml import etree
+import copy
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_cib_schema_change():
+ "Changing the validate-with CIB attribute"
+ copy_of_cib = copy.copy(factory.cib_orig)
+ print(etree.tostring(copy_of_cib, pretty_print=True))
+ tmp_cib_objects = factory.cib_objects
+ factory.cib_objects = []
+ factory.change_schema("pacemaker-1.1")
+ factory.cib_objects = tmp_cib_objects
+ factory._copy_cib_attributes(copy_of_cib, factory.cib_orig)
+ assert factory.cib_attrs["validate-with"] == "pacemaker-1.1"
+ assert factory.cib_elem.get("validate-with") == "pacemaker-1.1"
diff --git a/test/unittests/test_cliformat.py b/test/unittests/test_cliformat.py
new file mode 100644
index 0000000..2eb25b5
--- /dev/null
+++ b/test/unittests/test_cliformat.py
@@ -0,0 +1,324 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for cliformat.py
+
+from crmsh import cibconfig
+from crmsh import parse
+from lxml import etree
+from .test_parse import MockValidation
+
+factory = cibconfig.cib_factory
+
+
+def assert_is_not_none(thing):
+ assert thing is not None, "Expected non-None value"
+
+
+def roundtrip(cli, debug=False, expected=None, format_mode=-1, strip_color=False):
+ parse.validator = MockValidation()
+ node, _, _ = cibconfig.parse_cli_to_xml(cli)
+ assert_is_not_none(node)
+ obj = factory.find_object(node.get("id"))
+ if obj:
+ factory.delete(node.get("id"))
+ obj = factory.create_from_node(node)
+ assert_is_not_none(obj)
+ obj.nocli = True
+ xml = obj.repr_cli(format_mode=format_mode)
+ print(xml)
+ obj.nocli = False
+ s = obj.repr_cli(format_mode=format_mode)
+ if strip_color:
+ import re
+ s = re.sub(r"\$\{[^}]+\}", "", s)
+ if (s != cli) or debug:
+ print("GOT:", s)
+ print("EXP:", cli)
+ assert obj.cli_use_validate()
+ if expected is not None:
+ assert expected == s
+ else:
+ assert cli == s
+ assert not debug
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ "tear down test fixtures"
+
+
+def test_rscset():
+ roundtrip('colocation foo inf: a b')
+ roundtrip('order order_2 Mandatory: [ A B ] C')
+ roundtrip('rsc_template public_vm Xen')
+
+
+''' Seems rely on cluster env, should be in functional test
+def test_normalize():
+ """
+ Test automatic normalization of parameter names:
+ "shutdown_timeout" is a parameter name, but
+ "shutdown-timeout" is not.
+ """
+ roundtrip('primitive vm1 Xen params shutdown-timeout=0',
+ expected='primitive vm1 Xen params shutdown_timeout=0')
+'''
+
+
+def test_group():
+ factory.create_from_cli('primitive p1 Dummy')
+ roundtrip('group g1 p1 params target-role=Stopped')
+
+
+def test_bnc863736():
+ roundtrip('order order_3 Mandatory: [ A B ] C symmetrical=true')
+
+
+def test_sequential():
+ roundtrip('colocation rsc_colocation-master inf: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]')
+
+
+def test_broken_colo():
+ xml = """<rsc_colocation id="colo-2" score="INFINITY">
+ <resource_set id="colo-2-0" require-all="false">
+ <resource_ref id="vip1"/>
+ <resource_ref id="vip2"/>
+ </resource_set>
+ <resource_set id="colo-2-1" require-all="false" role="Master">
+ <resource_ref id="apache"/>
+ </resource_set>
+</rsc_colocation>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ assert 'colocation colo-2 inf: [ vip1 vip2 sequential=true ] [ apache:Master sequential=true ]' == data
+ assert obj.cli_use_validate()
+
+
+def test_comment():
+ roundtrip("# comment 1\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_comment2():
+ roundtrip("# comment 1\n# comment 2\n# comment 3\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_nvpair_ref1():
+ factory.create_from_cli("primitive dummy-0 Dummy params $fiz:buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @fiz:boz')
+
+
+def test_idresolve():
+ factory.create_from_cli("primitive dummy-5 Dummy params buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @dummy-5-instance_attributes-buz')
+
+
+def test_ordering():
+ xml = """<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy-monitor-60"/> \
+ </operations> \
+ <meta_attributes id="dummy-meta_attributes"> \
+ <nvpair id="dummy-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy ocf:pacemaker:Dummy op start timeout=60 interval=0 op stop timeout=60 interval=0 op monitor interval=60 timeout=30 meta target-role=Stopped'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_ordering2():
+ xml = """<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="dummy2-meta_attributes"> \
+ <nvpair id="dummy2-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy2-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy2-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy2-monitor-60"/> \
+ </operations> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy2 ocf:pacemaker:Dummy meta target-role=Stopped ' \
+ 'op start timeout=60 interval=0 op stop timeout=60 interval=0 ' \
+ 'op monitor interval=60 timeout=30'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing():
+ xml = """<fencing-topology>
+ <fencing-level devices="st1" id="fencing" index="1"
+target="ha-three"></fencing-level>
+ <fencing-level devices="st1" id="fencing-0" index="1"
+target="ha-two"></fencing-level>
+ <fencing-level devices="st1" id="fencing-1" index="1"
+target="ha-one"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology st1'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing2():
+ xml = """<fencing-topology>
+ <fencing-level devices="apple" id="fencing" index="1"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="2"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="1"
+target-pattern="red.*"></fencing-level>
+ <fencing-level devices="apple" id="fencing" index="2"
+target-pattern="red.*"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology pattern:green.* apple pear pattern:red.* pear apple'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_master():
+ xml = """<master id="ms-1">
+ <crmsh-ref id="dummy3" />
+ </master>
+ """
+ data = etree.fromstring(xml)
+ factory.create_from_cli("primitive dummy3 ocf:pacemaker:Dummy")
+ data, _, _ = cibconfig.postprocess_cli(data)
+ print("after postprocess:", etree.tostring(data))
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ assert obj.cli_use_validate()
+
+
+def test_param_rules():
+ roundtrip('primitive foo Dummy ' +
+ 'params rule #uname eq wizbang laser=yes ' +
+ 'params rule #uname eq gandalf staff=yes')
+
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: rule #uname eq node1 interface=eth1 ' +
+ 'params 2: rule #uname eq node2 interface=eth2 port=8888 ' +
+ 'params 1: interface=eth0 port=9999')
+
+
+def test_operation_rules():
+ roundtrip('primitive test Dummy ' +
+ 'op start interval=0 '
+ 'op_params 2: rule #uname eq node1 fake=fake ' +
+ 'op_params 1: fake=real ' +
+ 'op_meta 2: rule #ra-version version:gt 1.0 timeout=120s ' +
+ 'op_meta 1: timeout=60s')
+
+
+def test_multiple_attrsets():
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: interface=eth1 ' +
+ 'params 2: port=8888')
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'meta 3: interface=eth1 ' +
+ 'meta 2: port=8888')
+
+
+def test_new_acls():
+ roundtrip('role fum description=test read description=test2 xpath:"*[@name=karl]"')
+
+
+def test_acls_reftype():
+ roundtrip('role boo deny ref:d0 type:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_acls_oldsyntax():
+ roundtrip('role boo deny ref:d0 tag:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_rules():
+ roundtrip('primitive p1 Dummy params ' +
+ 'rule $role=Started date in start=2009-05-26 end=2010-05-26 ' +
+ 'or date gt 2014-01-01 state=2')
+
+
+def test_new_role():
+ roundtrip('role silly-role-2 read xpath:"//nodes//attributes" ' +
+ 'deny type:nvpair deny ref:d0 deny type:nvpair')
+
+
+def test_topology_1114():
+ roundtrip('fencing_topology attr:rack=1 node1,node2')
+
+
+def test_topology_1114_pattern():
+ roundtrip('fencing_topology pattern:.* network disk')
+
+
+def test_locrule():
+ roundtrip('location loc-testfs-with-eth1 testfs rule ethmonitor-eth1 eq 1')
+
+
+def test_is_value_sane():
+ roundtrip('''primitive p1 Dummy params state="bo'o"''')
+
+
+def test_is_value_sane_2():
+ roundtrip('primitive p1 Dummy params state="bo\\"o"')
+
+
+def test_alerts_1():
+ roundtrip('alert alert1 "/tmp/foo.sh" to "/tmp/bar.log"')
+
+
+def test_alerts_2():
+ roundtrip('alert alert2 "/tmp/foo.sh" attributes foo=bar to "/tmp/bar.log"')
+
+
+def test_alerts_3():
+ roundtrip('alert alert3 "a path here" meta baby to "/tmp/bar.log"')
+
+
+def test_alerts_4():
+ roundtrip('alert alert4 "/also/a/path"')
+
+
+def test_alerts_5():
+ roundtrip('alert alert5 "/a/path" to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_6():
+ roundtrip('alert alert6 "/a/path" select fencing attributes { standby } to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_7():
+ roundtrip('alert alert7 "/a/path" select fencing attributes foo=bar to { "/another/path" } meta timeout=30s')
diff --git a/test/unittests/test_corosync.py b/test/unittests/test_corosync.py
new file mode 100644
index 0000000..2443f36
--- /dev/null
+++ b/test/unittests/test_corosync.py
@@ -0,0 +1,488 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+
+from builtins import str
+from builtins import object
+import os
+import unittest
+import pytest
+from unittest import mock
+from crmsh import corosync
+from crmsh.corosync import Parser, make_section, make_value
+
+
+F1 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.1')).read()
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F3 = open(os.path.join(os.path.dirname(__file__), 'bug-862577_corosync.conf')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+def _valid(parser):
+ depth = 0
+ for t in parser._tokens:
+ if t.token not in (corosync._tCOMMENT,
+ corosync._tBEGIN,
+ corosync._tEND,
+ corosync._tVALUE):
+ raise AssertionError("illegal token " + str(t))
+ if t.token == corosync._tBEGIN:
+ depth += 1
+ if t.token == corosync._tEND:
+ depth -= 1
+ if depth != 0:
+ raise AssertionError("Unbalanced sections")
+
+
+def _print(parser):
+ print(parser.to_string())
+
+
+def test_query_status_exception():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("test")
+ assert str(err.value) == "Wrong type \"test\" to query status"
+
+
+@mock.patch('crmsh.corosync.query_ring_status')
+def test_query_status(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status_exception(mock_configured):
+ mock_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qdevice_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_configured.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.print_cluster_nodes')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status(mock_configured, mock_run, mock_print):
+ mock_configured.return_value = True
+ corosync.query_qdevice_status()
+ mock_run.assert_called_once_with("corosync-qdevice-tool -sv")
+ mock_print.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_ring_status")
+def test_query_status_ring(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_quorum_status")
+def test_query_status_quorum(mock_quorum_status):
+ corosync.query_status("quorum")
+ mock_quorum_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_qnetd_status")
+def test_query_status_qnetd(mock_qnetd_status):
+ corosync.query_status("qnetd")
+ mock_qnetd_status.assert_called_once_with()
+
+
+def test_query_status_except():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("xxx")
+ assert str(err.value) == "Wrong type \"xxx\" to query status"
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status_except(mock_run):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_ring_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status(mock_run):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_ring_status()
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_except(mock_run, mock_print_nodes):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_quorum_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status(mock_run, mock_print_nodes):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_no_quorum(mock_run, mock_print_nodes):
+ mock_run.return_value = (2, "no quorum", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_qdevice(mock_qdevice_configured):
+ mock_qdevice_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_cluster_name(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.return_value = None
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "cluster_name not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_host(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", None]
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "host for qnetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy_id_failed(mock_qdevice_configured,
+ mock_get_value, mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_parallax_call.side_effect = ValueError("Failed on 10.10.10.123: foo")
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert err.value.args[0] == "Failed on 10.10.10.123: foo"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy(mock_qdevice_configured, mock_get_value,
+ mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes,
+ mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ mock_parallax_call.return_value = [("node1", (0, "data", None)), ]
+
+ corosync.query_qnetd_status()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+ mock_parallax_call.assert_called_once_with(["10.10.10.123"], "corosync-qnetd-tool -lv -c hacluster")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.get_nodeinfo_from_cmaptool')
+@mock.patch('crmsh.corosync.add_node_ucast')
+def test_add_nodelist_from_cmaptool(mock_add_ucast, mock_nodeinfo):
+ mock_nodeinfo.return_value = {'1': ['10.10.10.1', '20.20.20.1'],'2': ['10.10.10.2', '20.20.20.2']}
+
+ corosync.add_nodelist_from_cmaptool()
+
+ mock_nodeinfo.assert_called_once_with()
+ mock_add_ucast.assert_has_calls([
+ mock.call(['10.10.10.1', '20.20.20.1'], '1'),
+ mock.call(['10.10.10.2', '20.20.20.2'], '2')
+ ])
+
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_unicast(mock_get_value):
+ mock_get_value.return_value = "udpu"
+ assert corosync.is_unicast() is True
+ mock_get_value.assert_called_once_with("totem.transport")
+
+
+@mock.patch('crmsh.corosync.get_corosync_value_dict')
+def test_token_and_consensus_timeout(mock_get_dict):
+ mock_get_dict.return_value = {"token": 10, "consensus": 12}
+ assert corosync.token_and_consensus_timeout() == 22
+
+
+@mock.patch('crmsh.corosync.get_corosync_value')
+def test_get_corosync_value_dict(mock_get_value):
+ mock_get_value.side_effect = ["10000", None]
+ res = corosync.get_corosync_value_dict()
+ assert res == {"token": 10, "consensus": 12}
+
+
+@mock.patch('crmsh.corosync.get_value')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value_raise(mock_run, mock_get_value):
+ mock_run.side_effect = ValueError
+ mock_get_value.return_value = None
+ assert corosync.get_corosync_value("xxx") is None
+ mock_run.assert_called_once_with("corosync-cmapctl xxx")
+ mock_get_value.assert_called_once_with("xxx")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value(mock_run):
+ mock_run.return_value = "totem.token = 10000"
+ assert corosync.get_corosync_value("totem.token") == "10000"
+ mock_run.assert_called_once_with("corosync-cmapctl totem.token")
+
+
+class TestCorosyncParser(unittest.TestCase):
+ def test_parse(self):
+ p = Parser(F1)
+ _valid(p)
+ self.assertEqual(p.get('logging.logfile'), '/var/log/cluster/corosync.log')
+ self.assertEqual(p.get('totem.interface.ttl'), '1')
+ p.set('totem.interface.ttl', '2')
+ _valid(p)
+ self.assertEqual(p.get('totem.interface.ttl'), '2')
+ p.remove('quorum')
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 0)
+ p.add('', make_section('quorum', []))
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 1)
+ p.set('quorum.votequorum', '2')
+ _valid(p)
+ self.assertEqual(p.get('quorum.votequorum'), '2')
+ p.set('bananas', '5')
+ _valid(p)
+ self.assertEqual(p.get('bananas'), '5')
+
+ def test_udpu(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', '10.10.10.10') +
+ make_value('nodelist.node.nodeid', str(corosync.get_free_nodeid(p)))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 6)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1', '2', '3'])
+
+ def test_add_node_no_nodelist(self):
+ "test checks that if there is no nodelist, no node is added"
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F1)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_no_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ corosync.find_configured_ip(["10.10.10.2"])
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ mock_search.assert_called_once_with("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ with self.assertRaises(corosync.IPAlreadyConfiguredError) as err:
+ corosync.find_configured_ip(["10.10.10.2"])
+ self.assertEqual("IP 10.10.10.2 was already configured", str(err.exception))
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ # For some reason mock_search.assert_called_once_with does not work
+ mock_search.assert_has_calls([mock.call("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")])
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.get_values")
+ @mock.patch("crmsh.corosync.make_value")
+ @mock.patch("crmsh.corosync.get_free_nodeid")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.utils.read_from_file")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.corosync.find_configured_ip")
+ def test_add_node_ucast(self, mock_find_ip, mock_conf, mock_read_file, mock_parser,
+ mock_free_id, mock_make_value, mock_get_values, mock_make_section, mock_str2file):
+ mock_parser_inst = mock.Mock()
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_parser.return_value = mock_parser_inst
+ mock_free_id.return_value = 2
+ mock_make_value.side_effect = [["value1"], ["value2"]]
+ mock_get_values.return_value = []
+ mock_make_section.side_effect = ["section1", "section2"]
+ mock_parser_inst.count.return_value = 2
+ mock_parser_inst.get.return_value = "net"
+ mock_parser_inst.to_string.return_value = "string data"
+
+ corosync.add_node_ucast(['10.10.10.1'])
+
+ mock_find_ip.assert_called_once_with(['10.10.10.1'])
+ mock_parser.assert_called_once_with("data")
+ mock_free_id.assert_called_once_with(mock_parser_inst)
+ mock_make_value.assert_has_calls([
+ mock.call('nodelist.node.ring0_addr', '10.10.10.1'),
+ mock.call('nodelist.node.nodeid', '2')
+ ])
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_make_section.assert_has_calls([
+ mock.call('nodelist', []),
+ mock.call('nodelist.node', ["value1", "value2"])
+ ])
+ mock_parser_inst.add.assert_has_calls([
+ mock.call('', 'section1'),
+ mock.call('nodelist', 'section2')
+ ])
+ mock_parser_inst.count.assert_called_once_with("nodelist.node")
+ mock_parser_inst.set.assert_has_calls([
+ mock.call('quorum.two_node', '1'),
+ mock.call('quorum.two_node', '0')
+ ])
+ mock_parser_inst.get.assert_called_once_with('quorum.device.model')
+ mock_parser_inst.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ def test_add_node_nodelist(self):
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F2)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ c = p.count('nodelist.node')
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), c + 1)
+ self.assertEqual(get_free_nodeid(p), nid + 1)
+
+ def test_remove_node(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.remove_section_where('nodelist.node', 'nodeid', '2')
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 4)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1'])
+
+ def test_bnc862577(self):
+ p = Parser(F3)
+ _valid(p)
+ self.assertEqual(p.count('service.ver'), 1)
+
+ def test_get_free_nodeid(self):
+ def ids(*lst):
+ class Ids(object):
+ def get_all(self, _arg):
+ return lst
+ return Ids()
+ self.assertEqual(1, corosync.get_free_nodeid(ids('2', '5')))
+ self.assertEqual(3, corosync.get_free_nodeid(ids('1', '2', '5')))
+ self.assertEqual(4, corosync.get_free_nodeid(ids('1', '2', '3')))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_crashtest_check.py b/test/unittests/test_crashtest_check.py
new file mode 100644
index 0000000..deb1ca5
--- /dev/null
+++ b/test/unittests/test_crashtest_check.py
@@ -0,0 +1,790 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import check, config
+
+
+class TestCheck(TestCase):
+
+ @mock.patch('crmsh.crash_test.check.check_cluster')
+ def test_check(self, mock_cluster_check):
+ ctx = mock.Mock(cluster_check=True)
+ check.check(ctx)
+ mock_cluster_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_firewall')
+ @mock.patch('crmsh.crash_test.check.check_time_service')
+ @mock.patch('crmsh.crash_test.check.check_my_hostname_resolves')
+ def test_check_environment(self, mock_hostname, mock_time, mock_firewall):
+ check.check_environment()
+ mock_hostname.assert_called_once_with()
+ mock_time.assert_called_once_with()
+ mock_firewall.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.check.crmshboot.my_hostname_resolves')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_my_hostname_resolves(self, mock_task_check, mock_hostname, mock_this_node):
+ mock_task_inst = mock.Mock()
+ mock_task_check.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_hostname.return_value = False
+ mock_this_node.return_value = "node1"
+
+ check.check_my_hostname_resolves()
+
+ mock_task_check.assert_called_once_with("Checking hostname resolvable")
+ mock_hostname.assert_called_once_with()
+ mock_task_inst.error.assert_called_once_with('Hostname "node1" is unresolvable.\n Please add an entry to /etc/hosts or configure DNS.')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_none(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.side_effect = [False, False, False]
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_has_calls([
+ mock.call('chronyd.service'),
+ mock.call('ntp.service'),
+ mock.call('ntpd.service')
+ ])
+ mock_task_inst.warn.assert_called_once_with("No NTP service found.")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_warn(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = False
+ mock_service_active.return_value = False
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_called_once_with("chronyd.service is available")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("chronyd.service is disabled"),
+ mock.call("chronyd.service is not active"),
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = True
+ mock_service_active.return_value = True
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("chronyd.service is available"),
+ mock.call("chronyd.service is enabled"),
+ mock.call("chronyd.service is active")
+ ])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_return(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ mock_run.return_value = (1, None, "error")
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("error")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_fail_to_get_port(self, mock_corosync_port):
+ mock_corosync_port.return_value = []
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("Can not get corosync's port")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ output_cmd = """
+ 1234/udp
+ 4444/tcp
+ """
+ mock_run.return_value = (0, output_cmd, None)
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("UDP port 5678 should open in firewalld")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+ task_inst.info.assert_called_once_with("UDP port 1234 is opened in firewalld")
+
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_not_intalled(self, mock_task, mock_installed):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.side_effect = [False, False]
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_has_calls([
+ mock.call("firewalld"),
+ mock.call("SuSEfirewall2")
+ ])
+ mock_task_inst.warn.assert_called_once_with("Failed to detect firewall")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_warn(self, mock_task, mock_installed, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = False
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_called_once_with("firewalld.service is available")
+ mock_task_inst.warn.assert_called_once_with("firewalld.service is not active")
+
+ @mock.patch('crmsh.crash_test.check.check_port_open')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall(self, mock_task, mock_installed, mock_active, mock_check_port):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = True
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("firewalld.service is available"),
+ mock.call("firewalld.service is active")
+ ])
+ mock_active.assert_called_once_with("firewalld")
+ mock_check_port.assert_called_once_with(mock_task_inst, "firewalld")
+
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster_return(self, mock_check_cluster):
+ mock_check_cluster.return_value = False
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_resources')
+ @mock.patch('crmsh.crash_test.check.check_nodes')
+ @mock.patch('crmsh.crash_test.check.check_fencing')
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster(self, mock_check_cluster, mock_check_fencing, mock_check_nodes, mock_check_resources):
+ mock_check_cluster.return_value = True
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+ mock_check_fencing.assert_called_once_with()
+ mock_check_nodes.assert_called_once_with()
+ mock_check_resources.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service_pacemaker_disable(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=False)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [False, True]
+ mock_active.side_effect = [True, False]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, False)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("pacemaker.service is disabled"),
+ mock.call("corosync.service is enabled")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_called_once_with("corosync.service is running")
+ mock_task_inst.error.assert_called_once_with("pacemaker.service is not running!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=True)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [True, True]
+ mock_active.side_effect = [True, True]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, True)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_has_calls([
+ mock.call("pacemaker.service is enabled"),
+ mock.call("corosync.service is running"),
+ mock.call("pacemaker.service is running")
+ ])
+ mock_task_inst.warn.assert_called_once_with("corosync.service is enabled")
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_stonith(self, mock_task, mock_fence_info):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=False)
+ mock_fence_info.return_value = mock_fence_info_inst
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_task_inst.warn.assert_called_once_with("stonith is disabled")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_resources(self, mock_task, mock_fence_info, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (1, None, None)
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_called_once_with("stonith is enabled")
+ mock_task_inst.warn.assert_called_once_with("No stonith resource configured!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_has_warn(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Stopped (disabled)", None)
+ mock_active.return_value = False
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("stonith resource stonith-sbd(external/sbd) is Stopped"),
+ mock.call("sbd service is not running!")
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Started node2", None)
+ mock_active.return_value = True
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is Started"),
+ mock.call("sbd service is running")
+ ])
+ mock_active.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_error(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_run.return_value = (1, None, "error data")
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.error.assert_called_once_with("run \"crm_mon -1\" error: error data")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: 15sp2-1 (version 2.0.3+20200511.2b248d828-1.10-2.0.3+20200511.2b248d828) - partition with quorum
+ * Last updated: Tue Nov 3 14:09:29 2020
+ * Last change: Tue Nov 3 13:47:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Online: [ 15sp2-1 ]
+ * OFFLINE: [ 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("DC node: 15sp2-1"),
+ mock.call("Cluster have quorum"),
+ mock.call("Online nodes: [ 15sp2-1 ]")
+ ])
+ mock_task_inst.warn.assert_called_once_with("OFFLINE nodes: [ 15sp2-2 ]")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_warn(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: NONE
+ * Last updated: Tue Nov 3 14:16:49 2020
+ * Last change: Tue Nov 3 14:09:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Node 15sp2-1: UNCLEAN (offline)
+ * Node 15sp2-2: UNCLEAN (offline)
+
+Active Resources:
+ * No active resources
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("Cluster lost quorum!"),
+ mock.call("Node 15sp2-1 is UNCLEAN!"),
+ mock.call("Node 15sp2-2 is UNCLEAN!")
+ ])
+
+ @mock.patch('crmsh.crash_test.check.completers.resources_stopped')
+ @mock.patch('crmsh.crash_test.check.completers.resources_started')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_resources(self, mock_task, mock_started, mock_stopped):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_started.return_value = ["r1", "r2"]
+ mock_stopped.return_value = ["r3", "r4"]
+
+ check.check_resources()
+
+ mock_task.assert_called_once_with("Checking resources")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("Started resources: r1,r2"),
+ mock.call("Stopped resources: r3,r4")
+ ])
+
+ # Test fix()
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_no_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has no valid candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = dev
+ check.fix(ctx)
+ mock_correct_sbd.assert_called_once_with(ctx, dev)
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_has_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has valid candidate
+ """
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = ""
+ mock_correct_sbd.return_value = ""
+ check.fix(ctx)
+ mock_correct_sbd.assert_not_called()
+
+ # Test check_sbd()
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_no_conf(cls, mock_os_path_exists,
+ mock_utils_msg_info, mock_run):
+ """
+ Test no configuration file
+ """
+ mock_os_path_exists.return_value = False
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD configuration file {} not found.".
+ format(config.SBD_CONF), to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_not_configured(cls, mock_os_path_exists, mock_utils_parse_sysconf,
+ mock_utils_msg_info, mock_run):
+ """
+ Test SBD device not configured
+ """
+ mock_os_path_exists.return_value = True
+ mock_utils_parse_sysconf.return_value = {}
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD DEVICE not used.", to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_valid(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_info, mock_is_valid_sbd, mock_run):
+ """
+ Test configured SBD device exist and valid
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = True
+
+ check.check_sbd()
+ mock_msg_info.assert_called_with("'{}' is a valid SBD device.".format(dev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_but_no_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid and no candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = ""
+
+ check.check_sbd()
+ mock_msg_warn.assert_has_calls(
+ [mock.call("Device '{}' is not valid for SBD, may need initialize.".
+ format(dev), to_stdout=False),
+ mock.call("Fail to find a valid candidate SBD device.",
+ to_stdout=False)])
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_exist_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "SBD device '{}' is not exist.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "Device '{}' is not valid for SBD, may need initialize.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ # Test correct_sbd()
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_conf(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [False, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Configure file {} not exist!'.
+ format(config.SBD_CONF))
+
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_dev(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Device {} not exist!'.format(dev))
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.TaskFixSBD.verify')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.remove')
+ @mock.patch('shutil.move')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_move, mock_remove,
+ mock_mkstemp, mock_sbd_verify, mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="SBD_DEVICE={}".format(dev)).return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_called_once_with(config.SBD_CONF, bak)
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_move.assert_called_once_with(edit, config.SBD_CONF)
+ mock_remove.assert_called()
+ mock_sbd_verify.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_run_exception(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_mkstemp, mock_msg_error,
+ mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="data2").return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+ mock_copymode.side_effect = Exception('Copy file error!')
+
+ with cls.assertRaises(cls, crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_has_calls([mock.call(config.SBD_CONF, bak),
+ mock.call(bak, config.SBD_CONF)])
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_msg_error.assert_called_once_with('Fail to modify file {}'.
+ format(config.SBD_CONF))
diff --git a/test/unittests/test_crashtest_main.py b/test/unittests/test_crashtest_main.py
new file mode 100644
index 0000000..02ae7b3
--- /dev/null
+++ b/test/unittests/test_crashtest_main.py
@@ -0,0 +1,215 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestContext(TestCase):
+
+ def test_context(self):
+ main.ctx.name = "xin"
+ self.assertEqual(main.ctx.name, "xin")
+
+
+class TestMain(TestCase):
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument_help(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.parse_argument(ctx)
+
+ mock_parser_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=False, env_check=True, sbd=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ main.parse_argument(ctx)
+ self.assertEqual(ctx.env_check, True)
+ self.assertEqual(ctx.sbd, True)
+
+ mock_parser_inst.print_help.assert_not_called()
+
+ def test_setup_basic_context(self):
+ ctx = mock.Mock(process_name="crash_test")
+ main.setup_basic_context(ctx)
+ self.assertEqual(ctx.var_dir, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.report_path, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.jsonfile, "/var/lib/crmsh/crash_test/crash_test.json")
+ self.assertEqual(ctx.logfile, "/var/log/crmsh/crmsh.log")
+
+ @mock.patch('logging.Logger.fatal')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_non_root(self, mock_setup, mock_parse, mock_is_root, mock_log_fatal):
+ mock_is_root.return_value = False
+ ctx = mock.Mock(process_name="crash_test")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_log_fatal.assert_called_once_with("{} can only be executed as user root!".format(ctx.process_name))
+
+ @mock.patch('crmsh.crash_test.main.split_brain')
+ @mock.patch('crmsh.crash_test.main.fence_node')
+ @mock.patch('crmsh.crash_test.main.kill_process')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.makedirs')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run(self, mock_setup, mock_parse, mock_is_root, mock_exists, mock_mkdir,
+ mock_fix, mock_check, mock_kill, mock_fence, mock_sb):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = False
+
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_mkdir.assert_called_once_with(ctx.var_dir, exist_ok=True)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_kill.assert_called_once_with(ctx)
+ mock_fence.assert_called_once_with(ctx)
+ mock_sb.assert_called_once_with(ctx)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_except(self, mock_setup, mock_parse, mock_is_root, mock_exists,
+ mock_fix, mock_check, mock_dumps):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = True
+ mock_check.side_effect = KeyboardInterrupt
+
+ with self.assertRaises(KeyboardInterrupt):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_dumps.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return_pacemaker_loop(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=True, loop=True, sbd=None, corosync=None)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=False, sbd=False, corosync=False)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_process(self, mock_task_kill):
+ mock_task_kill_inst = mock.Mock()
+ mock_task_kill.return_value = mock_task_kill_inst
+ mock_task_kill_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(sbd=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.kill_process(ctx)
+
+ mock_task_kill_inst.pre_check.assert_called_once_with()
+ mock_task_kill_inst.print_header.assert_called_once_with()
+ mock_task_kill_inst.enable_report.assert_called_once_with()
+ mock_task_kill_inst.run.assert_called_once_with()
+ mock_task_kill_inst.wait.assert_called_once_with()
+ mock_task_kill_inst.error.assert_called_once_with("error data")
+
+ def test_split_brain_return(self):
+ ctx = mock.Mock(sp_iptables=None)
+ main.split_brain(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True, force=False)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.do_block.return_value.__enter__ = mock.Mock()
+ mock_sp_inst.do_block.return_value.__exit__ = mock.Mock()
+
+ main.split_brain(ctx)
+
+ mock_sp.assert_called_once_with(False)
+ mock_sp_inst.pre_check.assert_called_once_with()
+ mock_sp_inst.print_header.assert_called_once_with()
+ mock_sp_inst.do_block.assert_called_once_with()
+ mock_sp_inst.run.assert_called_once_with()
+ mock_sp_inst.wait.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain_exception(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.pre_check.side_effect = task.TaskError("error data")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.split_brain(ctx)
+
+ mock_sp_inst.error.assert_called_once_with("error data")
+
+ def test_fence_node_return(self):
+ ctx = mock.Mock(fence_node=None)
+ main.fence_node(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskFence')
+ def test_fence_node(self, mock_task_fence):
+ mock_task_fence_inst = mock.Mock()
+ mock_task_fence.return_value = mock_task_fence_inst
+ mock_task_fence_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(fence_node=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.fence_node(ctx)
+
+ mock_task_fence_inst.pre_check.assert_called_once_with()
+ mock_task_fence_inst.print_header.assert_called_once_with()
+ mock_task_fence_inst.run.assert_called_once_with()
+ mock_task_fence_inst.wait.assert_called_once_with()
+ mock_task_fence_inst.error.assert_called_once_with("error data")
+
+ @classmethod
+ def test_MyArgParseFormatter(cls):
+ main.MyArgParseFormatter("test")
diff --git a/test/unittests/test_crashtest_task.py b/test/unittests/test_crashtest_task.py
new file mode 100644
index 0000000..3b4c092
--- /dev/null
+++ b/test/unittests/test_crashtest_task.py
@@ -0,0 +1,777 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+from datetime import datetime
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestTaskKill(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(current_case="sbd", loop=False)
+ self.task_kill_inst = task.TaskKill(ctx)
+ ctx2 = mock.Mock(current_case="sbd", loop=True)
+ self.task_kill_inst_loop = task.TaskKill(ctx2)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('os.path.isdir')
+ def test_enable_report_error(self, mock_isdir):
+ mock_isdir.return_value = False
+ main.ctx = mock.Mock(report_path="/path")
+ with self.assertRaises(task.TaskError) as error:
+ self.task_kill_inst.enable_report()
+ self.assertEqual("/path is not a directory", str(error.exception))
+ mock_isdir.assert_called_once_with("/path")
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report_looping(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst_loop.enable_report()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst.enable_report()
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Force kill sbd
+Looping Kill: False
+Expected State: a) sbd process restarted
+ b) Or, this node fenced.
+"""
+ res = self.task_kill_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_kill_inst.build_base_result = mock.Mock()
+ self.task_kill_inst.result = {}
+ self.task_kill_inst.prev_task_list = []
+ self.task_kill_inst.to_json()
+ self.task_kill_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_to_report_return(self):
+ self.task_kill_inst.report = False
+ self.task_kill_inst.to_report()
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('crmsh.crash_test.task.TaskKill.header')
+ def test_to_report(self, mock_header, mock_open_file, mock_fsync):
+ mock_header.return_value = "#### header"
+ self.task_kill_inst.report = True
+ self.task_kill_inst.messages = [["info", "data", "2021"]]
+ self.task_kill_inst.explain = "explain"
+ self.task_kill_inst.report_file = "report_file1"
+ file_handle = mock_open_file.return_value.__enter__.return_value
+
+ self.task_kill_inst.to_report()
+
+ file_handle.write.assert_has_calls([
+ mock.call("#### header"),
+ mock.call("\nLog:\n"),
+ mock.call("2021 INFO:data\n"),
+ mock.call("\nTestcase Explained:\n"),
+ mock.call("explain\n")
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check(self, mock_pre_check, mock_status):
+ mock_status.return_value = (False, 100)
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.pre_check()
+ self.assertEqual("Process sbd is not running!", str(err.exception))
+ mock_pre_check.assert_called_once_with()
+ mock_status.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.crash_test.task.TaskKill.process_monitor')
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_run(self, mock_status, mock_info, mock_run, mock_thread, mock_fence_monitor, mock_process_monitor):
+ mock_status.side_effect = [(False, -1), (True, 100)]
+ mock_thread_fence_inst = mock.Mock()
+ mock_thread_restart_inst = mock.Mock()
+ mock_thread.side_effect = [mock_thread_fence_inst, mock_thread_restart_inst]
+
+ self.task_kill_inst.run()
+
+ mock_status.assert_has_calls([mock.call("sbd"), mock.call("sbd")])
+ mock_info.assert_has_calls([
+ mock.call('Process sbd(100) is running...'),
+ mock.call('Trying to run "killall -9 sbd"')
+ ])
+ mock_run.assert_called_once_with("killall -9 sbd")
+ mock_thread.assert_has_calls([
+ mock.call(target=mock_fence_monitor),
+ mock.call(target=mock_process_monitor),
+ ])
+ mock_thread_fence_inst.start.assert_called_once_with()
+ mock_thread_restart_inst.start.assert_called_once_with()
+
+ def test_wait_exception(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = False
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.wait()
+ self.assertEqual("Process sbd is not restarted!", str(err.exception))
+
+ def test_wait(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = True
+
+ self.task_kill_inst.wait()
+
+ self.task_kill_inst.thread_stop_event.set.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_process_monitor(self, mock_status, mock_info, mock_sleep):
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event.is_set.side_effect = [False, False]
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ mock_status.side_effect = [(False, -1), (True, 100)]
+
+ self.task_kill_inst.process_monitor()
+
+ self.task_kill_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_status.assert_has_calls([
+ mock.call("sbd"),
+ mock.call("sbd")
+ ])
+ mock_info.assert_called_once_with("Process sbd(100) is restarted!")
+ self.task_kill_inst.restart_happen_event.set.assert_called_once_with()
+ mock_sleep.assert_called_once_with(1)
+
+
+class TestTaskCheck(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_msg_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list=[{"process_name": "xin", "age": 38}])
+ self.task_check_inst = task.TaskCheck("task check job1", quiet=False)
+ self.task_check_inst_quiet = task.TaskCheck("task check job1", quiet=True)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.MyLoggingFormatter')
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_to_stdout(self, mock_manage_handler, mock_get_handler, mock_myformatter):
+ mock_manage_handler.return_value.__enter__ = mock.Mock()
+ mock_manage_handler.return_value.__exit__ = mock.Mock()
+
+ task.logger = mock.Mock()
+ task.logger.info = mock.Mock()
+ task.logger.log = mock.Mock()
+
+ get_handler_inst1 = mock.Mock()
+ get_handler_inst1.setFormatter = mock.Mock()
+ get_handler_inst2 = mock.Mock()
+ get_handler_inst2.setFormatter = mock.Mock()
+ mock_get_handler.side_effect = [get_handler_inst1, get_handler_inst2]
+
+ myformatter_inst1 = mock.Mock()
+ myformatter_inst2 = mock.Mock()
+ mock_myformatter.side_effect = [myformatter_inst1, myformatter_inst2]
+
+ self.task_check_inst.messages = [("info", "info message"), ("warn", "warn message")]
+ utils.CGREEN = ""
+ utils.CEND = ""
+ utils.CRED = ""
+
+ self.task_check_inst.to_stdout()
+
+ mock_manage_handler.assert_called_once_with("file", keep=False)
+ mock_get_handler.assert_has_calls([
+ mock.call(task.logger, "stream"),
+ mock.call(task.logger, "stream")
+ ])
+ get_handler_inst1.setFormatter.assert_called_once_with(myformatter_inst1)
+ get_handler_inst2.setFormatter.assert_called_once_with(myformatter_inst2)
+ mock_myformatter.assert_has_calls([
+ mock.call(flush=False),
+ mock.call()
+ ])
+ task.logger.info.assert_called_once_with('task check job1 [Pass]', extra={'timestamp': '[2019/07/10 01:15:15]'})
+ task.logger.log.assert_has_calls([
+ mock.call(20, 'info message', extra={'timestamp': ' '}).
+ mock.call(30, 'warn message', extra={'timestamp': ' '})
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_check_inst.build_base_result = mock.Mock()
+ self.task_check_inst.result = {}
+ self.task_check_inst.to_json()
+ self.task_check_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_print_result(self):
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.to_json = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_called_once_with()
+ self.task_check_inst.to_json.assert_called_once_with()
+
+ def test_print_result_quiet(self):
+ self.task_check_inst.quiet = True
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_not_called()
+
+ def test_run(self):
+ self.task_check_inst.print_result = mock.Mock()
+ with self.task_check_inst.run():
+ pass
+ self.task_check_inst.print_result.assert_called_once_with()
+
+
+class TestTaskSplitBrain(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ self.task_sp_inst = task.TaskSplitBrain()
+ self.task_sp_inst.fence_action = "reboot"
+ self.task_sp_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Simulate split brain by blocking traffic between cluster nodes
+Expected Result: One of nodes get fenced
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_sp_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_sp_inst.result = {}
+ self.task_sp_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_online_nodes):
+ mock_run.return_value = (0, None, None)
+ mock_online_nodes.return_value = ["node1"]
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("At least two nodes online!", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_online_nodes.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.peer_node_list')
+ def test_do_block_iptables(self, mock_peer_list, mock_info, mock_get_iplist, mock_run):
+ mock_peer_list.return_value = ["node1", "node2"]
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.do_block_iptables()
+ mock_peer_list.assert_called_once_with()
+ mock_info.assert_has_calls([
+ mock.call("Trying to temporarily block node1 communication ip"),
+ mock.call("Trying to temporarily block node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain.un_block_iptables')
+ def test_un_block(self, mock_unblock_iptables):
+ self.task_sp_inst.un_block()
+ mock_unblock_iptables.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_un_block_iptables(self, mock_info, mock_get_iplist, mock_run):
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.peer_nodelist = ["node1", "node2"]
+ self.task_sp_inst.un_block_iptables()
+ mock_info.assert_has_calls([
+ mock.call("Trying to recover node1 communication ip"),
+ mock.call("Trying to recover node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ def test_run(self, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_sp_inst.run()
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ def test_wait(self):
+ self.task_sp_inst.fence_finish_event = mock.Mock()
+ self.task_sp_inst.fence_finish_event.wait.return_value = False
+ self.task_sp_inst.thread_stop_event = mock.Mock()
+ self.task_sp_inst.wait()
+ self.task_sp_inst.fence_finish_event.wait.assert_called_once_with(60)
+ self.task_sp_inst.thread_stop_event.set.assert_called_once_with()
+
+
+class TestFence(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(fence_node="node1", yes=False)
+ self.task_fence_inst = task.TaskFence(ctx)
+ self.task_fence_inst.fence_action = "reboot"
+ self.task_fence_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Fence node node1
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_fence_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_fence_inst.result = {}
+ self.task_fence_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which crm_node")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.check_node_status')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_node_status):
+ mock_run.side_effect = [(0, None, None), (0, None, None), (0, None, None)]
+ mock_node_status.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("Node \"node1\" not in cluster!", str(err.exception))
+ mock_run.assert_has_calls([
+ mock.call("which crm_node"),
+ mock.call("which stonith_admin"),
+ mock.call("which crm_attribute")
+ ])
+ mock_node_status.assert_called_once_with("node1", "member")
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_run(self, mock_info, mock_run, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_fence_inst.run()
+ mock_info.assert_called_once_with("Trying to fence node \"node1\"")
+ mock_run.assert_called_once_with("crm_attribute -t status -N 'node1' -n terminate -v true")
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait_this_node(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node1"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = True
+
+ self.task_fence_inst.wait()
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for self reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node2"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = None
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.wait()
+ self.assertEqual("Target fence node \"node1\" still alive", str(err.exception))
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for node \"node1\" reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+
+class TestTask(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list={"process_name": "xin", "age": 38})
+ self.task_inst = task.Task("task description", flush=True)
+ mock_now.assert_called_once_with()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ self.task_inst.header()
+
+ def test_to_report(self):
+ self.task_inst.to_report()
+
+ def test_to_json(self):
+ self.task_inst.to_json()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception(self, mock_active):
+ mock_active.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Cluster not running!", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception_no_fence(self, mock_active):
+ mock_active.return_value = True
+ self.task_inst.get_fence_info = mock.Mock()
+ self.task_inst.fence_enabled = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Require stonith enabled", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+ self.task_inst.get_fence_info.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ def test_get_fence_info(self, mock_fence_info):
+ mock_fence_info_inst = mock.Mock()
+ mock_fence_info.return_value = mock_fence_info_inst
+ self.task_inst.get_fence_info()
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_info(self, mock_info):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.info("info message")
+ self.task_inst.msg_append.assert_called_once_with("info", "info message")
+ mock_info.assert_called_once_with("info message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ def test_warn(self, mock_warn):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.warn("warn message")
+ self.task_inst.msg_append.assert_called_once_with("warn", "warn message")
+ mock_warn.assert_called_once_with("warn message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ def test_error(self, mock_error):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.error("error message")
+ self.task_inst.msg_append.assert_called_once_with("error", "error message")
+ mock_error.assert_called_once_with("error message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.now')
+ def test_msg_append(self, mock_now):
+ self.task_inst.to_json = mock.Mock()
+ self.task_inst.to_report = mock.Mock()
+ self.task_inst.msg_append("error", "warn message")
+ mock_now.assert_called_once_with()
+ self.task_inst.to_json.assert_called_once_with()
+ self.task_inst.to_report.assert_called_once_with()
+
+ def test_build_base_result(self):
+ self.task_inst.build_base_result()
+ expected_result = {
+ "Timestamp": self.task_inst.timestamp,
+ "Description": self.task_inst.description,
+ "Messages": []
+ }
+ self.assertDictEqual(expected_result, self.task_inst.result)
+
+ @mock.patch('crmsh.crash_test.utils.warning_ask')
+ def test_print_header(self, mock_ask):
+ self.task_inst.header = mock.Mock()
+ self.task_inst.info = mock.Mock()
+ mock_ask.return_value = False
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ self.task_inst.print_header()
+
+ self.task_inst.header.assert_called_once_with()
+ mock_ask.assert_called_once_with(task.Task.REBOOT_WARNING)
+ self.task_inst.info.assert_called_once_with("Testcase cancelled")
+
+ @mock.patch('crmsh.crash_test.utils.str_to_datetime')
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_fence_action_monitor(self, mock_run, mock_info, mock_sleep, mock_datetime):
+ self.task_inst.thread_stop_event = mock.Mock()
+ self.task_inst.thread_stop_event.is_set.side_effect = [False, False, False, False]
+ self.task_inst.fence_start_event = mock.Mock()
+ self.task_inst.fence_finish_event = mock.Mock()
+ output = "Pending Fencing Actions:\n * reboot of 15sp2-2 pending: client=pacemaker-controld.2430, origin=15sp2-1"
+ output2 = "Node 15sp2-2 last fenced at: Tue Jan 19 16:08:37 2021"
+ mock_run.side_effect = [(1, None, None), (0, output, None), (1, None, None), (0, output2, None)]
+ self.task_inst.timestamp = "2021/01/19 16:08:24"
+ mock_datetime.side_effect = [
+ datetime.strptime(self.task_inst.timestamp, '%Y/%m/%d %H:%M:%S'),
+ datetime.strptime("Tue Jan 19 16:08:37 2021", '%a %b %d %H:%M:%S %Y')
+ ]
+
+ self.task_inst.fence_action_monitor()
+
+ self.task_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call(),
+ mock.call(),
+ mock.call()
+ ])
+ mock_run.assert_has_calls([
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2")),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2"))
+ ])
+ mock_info.assert_has_calls([
+ mock.call("Node \"15sp2-2\" will be fenced by \"15sp2-1\"!"),
+ mock.call("Node \"15sp2-2\" was successfully fenced by \"15sp2-1\"")
+ ])
+ self.task_inst.fence_start_event.set.assert_called_once_with()
+ self.task_inst.fence_finish_event.set.assert_called_once_with()
+
+class TestFixSBD(TestCase):
+ """
+ Class to test TaskFixSBD of task.py
+ All tested in test_crash_test.py except verify()
+ """
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info, mock_mkstemp, mock_isfile, mock_open):
+ """
+ Test setUp.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ self.task_fixsbd = task.TaskFixSBD(dev, force=False)
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+ pass
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_verify_succeed(self, mock_msg_info, mock_isfile, mock_open, mock_fsync):
+ """
+ Test verify successful.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ mock_msg_info.assert_called_once_with('SBD DEVICE change succeed',
+ to_stdout=True)
+ mock_fsync.assert_called()
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ def test_verify_fail(self, mock_isfile, mock_open):
+ """
+ Test verify failed.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ dev_cur = "/dev/disk/by-id/scsi-SATA_ST2000LM007-no_change"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev_cur)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ self.assertEqual("Fail to replace SBD device {} in {}!".
+ format(dev, config.SBD_CONF), str(err.exception))
diff --git a/test/unittests/test_crashtest_utils.py b/test/unittests/test_crashtest_utils.py
new file mode 100644
index 0000000..f8a579b
--- /dev/null
+++ b/test/unittests/test_crashtest_utils.py
@@ -0,0 +1,540 @@
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+import logging
+
+from crmsh.crash_test import utils, main, config
+
+
+class TestMyLoggingFormatter(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+
+class TestFenceInfo(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_false(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, False)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_true(self, mock_get_property):
+ mock_get_property.return_value = "True"
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, True)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action_none(self, mock_get_property, mock_error):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, None)
+ mock_get_property.assert_called_once_with("stonith-action")
+ mock_error.assert_called_once_with('Cluster property "stonith-action" should be reboot|off|poweroff')
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action(self, mock_get_property):
+ mock_get_property.return_value = "reboot"
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, "reboot")
+ mock_get_property.assert_called_once_with("stonith-action")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout(self, mock_get_property):
+ mock_get_property.return_value = "60s"
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, "60")
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout_default(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, config.FENCE_TIMEOUT)
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+
+class TestUtils(TestCase):
+ '''
+ Unitary tests for crash_test/utils.py
+ '''
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_now(self, mock_datetime):
+ mock_now = mock.Mock()
+ mock_datetime.now.return_value = mock_now
+ mock_now.strftime.return_value = "2019/07/05 14:44:55"
+
+ result = utils.now()
+
+ self.assertEqual(result, "2019/07/05 14:44:55")
+ mock_datetime.now.assert_called_once_with()
+ mock_now.strftime.assert_called_once_with("%Y/%m/%d %H:%M:%S")
+
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ def test_manage_handler(self, mock_get_handler):
+ mock_get_handler.return_value = "handler"
+ utils.logger = mock.Mock()
+ utils.logger.removeHandler = mock.Mock()
+ utils.logger.addHandler = mock.Mock()
+
+ with utils.manage_handler("type1", keep=False):
+ pass
+
+ mock_get_handler.assert_called_once_with(utils.logger, "type1")
+ utils.logger.removeHandler.assert_called_once_with("handler")
+ utils.logger.addHandler.assert_called_once_with("handler")
+
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_msg_raw(self, mock_handler):
+ utils.logger = mock.Mock()
+ utils.logger.log = mock.Mock()
+ utils.msg_raw("level1", "msg1")
+ mock_handler.assert_called_once_with("console", True)
+ utils.logger.log.assert_called_once_with("level1", "msg1")
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_info(self, mock_raw):
+ utils.msg_info("msg1")
+ mock_raw.assert_called_once_with(logging.INFO, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_warn(self, mock_raw):
+ utils.msg_warn("msg1")
+ mock_raw.assert_called_once_with(logging.WARNING, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_error(self, mock_raw):
+ utils.msg_error("msg1")
+ mock_raw.assert_called_once_with(logging.ERROR, "msg1", True)
+
+ @mock.patch('os.fsync')
+ @mock.patch('json.dumps')
+ @mock.patch('builtins.open', create=True)
+ def test_json_dumps(self, mock_open_file, mock_dumps, mock_fsync):
+ main.ctx = mock.Mock(jsonfile="file1", task_list={"process_name": "xin", "age": 38})
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_dumps.return_value = "data"
+
+ utils.json_dumps()
+
+ mock_open_file.assert_called_once_with("file1", "w")
+ mock_dumps.assert_called_once_with(main.ctx.task_list, indent=2)
+ file_handle.write.assert_called_once_with("data")
+ file_handle.flush.assert_called_once_with()
+ mock_fsync.assert_called_once_with(file_handle)
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.this_node')
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node_false(self, mock_run, mock_error, mock_this_node):
+ mock_run.return_value = (1, None, "error data")
+ mock_this_node.return_value = "node1"
+
+ res = utils.this_node()
+ self.assertEqual(res, "node1")
+
+ mock_run.assert_called_once_with("crm_node --name")
+ mock_error.assert_called_once_with("error data")
+ mock_this_node.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.this_node()
+ self.assertEqual(res, "data")
+ mock_run.assert_called_once_with("crm_node --name")
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_str_to_datetime(self, mock_datetime):
+ utils.str_to_datetime("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+ mock_datetime.strptime.assert_called_once_with("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_corosync_port_list(self, mock_run):
+ output = """
+totem.interface.0.bindnetaddr (str) = 10.10.10.121
+totem.interface.0.mcastaddr (str) = 239.101.40.63
+totem.interface.0.mcastport (u16) = 5405
+totem.interface.0.ttl (u8) = 1
+totem.interface.1.bindnetaddr (str) = 20.20.20.121
+totem.interface.1.mcastaddr (str) = 239.6.213.31
+totem.interface.1.mcastport (u16) = 5407
+totem.interface.1.ttl (u8) = 1
+ """
+ mock_run.return_value = (0, output, None)
+ result = utils.corosync_port_list()
+ expected = ['5405', '5407']
+ self.assertListEqual(result, expected)
+ mock_run.assert_called_once_with("corosync-cmapctl totem.interface")
+
+ def test_get_handler(self):
+ mock_handler1 = mock.Mock(_name="test1_handler")
+ mock_handler2 = mock.Mock(_name="test2_handler")
+ mock_logger = mock.Mock(handlers=[mock_handler1, mock_handler2])
+ res = utils.get_handler(mock_logger, "test1_handler")
+ self.assertEqual(res, mock_handler1)
+
+ @mock.patch('os.getuid')
+ def test_is_root(self, mock_getuid):
+ mock_getuid.return_value = 0
+ self.assertEqual(utils.is_root(), True)
+ mock_getuid.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status_false(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/cmd2\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/cmd2\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "cmd2"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, False)
+ self.assertEqual(pid, -1)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/cmd2\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/sbd\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/sbd\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "sbd"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, True)
+ self.assertEqual(pid, 2)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/sbd\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status_error_cmd(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ res = utils.check_node_status("node1", "member")
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status(self, mock_run, mock_error):
+ output = """
+1084783297 15sp2-1 member
+1084783193 15sp2-2 lost
+ """
+ mock_run.return_value = (0, output, None)
+
+ res = utils.check_node_status("15sp2-2", "member")
+ self.assertEqual(res, False)
+ res = utils.check_node_status("15sp2-1", "member")
+ self.assertEqual(res, True)
+
+ mock_run.assert_has_calls([
+ mock.call("crm_node -l"),
+ mock.call("crm_node -l")
+ ])
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes_empty(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.online_nodes()
+ self.assertEqual(res, [])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes(self, mock_run):
+ output = """
+Node List:
+ * Online: [ 15sp2-1 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+ res = utils.online_nodes()
+ self.assertEqual(res, ["15sp2-1", "15sp2-2"])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list_empty(self, mock_online):
+ mock_online.return_value = None
+ res = utils.peer_node_list()
+ self.assertEqual(res, [])
+ mock_online.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list(self, mock_online, mock_this_node):
+ mock_online.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.peer_node_list()
+ self.assertEqual(res, ["node2"])
+ mock_online.assert_called_once_with()
+
+ # Test is_valid_sbd():
+ @classmethod
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_exist(cls, mock_os_path_exists):
+ """
+ Test device not exist
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = False
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_cmd_error(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not valid sbd
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (-1, None, "Unknown error!")
+ mock_msg_err.return_value = ""
+
+ res = utils.is_valid_sbd(dev)
+ mock_msg_err.assert_called_once_with("Unknown error!")
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ err_output = """
+==Dumping header on disk {}
+==Header on disk {} NOT dumped
+sbd failed; please check the logs.
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (1, "==Dumping header on disk {}".format(dev),
+ err_output)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+ mock_msg_err.assert_called_once_with(err_output)
+
+ @classmethod
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_is_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ std_output = """
+==Dumping header on disk {}
+Header version : 2.1
+UUID : f4c99362-6522-46fc-8ce4-7db60aff19bb
+Number of slots : 255
+Sector size : 512
+Timeout (watchdog) : 5
+Timeout (allocate) : 2
+Timeout (loop) : 1
+Timeout (msgwait) : 10
+==Header on disk {} is dumped
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (0, std_output, None)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is True
+
+ # Test find_candidate_sbd() and _find_match_count()
+ @classmethod
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_dev(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob):
+ """
+ Test no suitable device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = []
+
+ res = utils.find_candidate_sbd("/not-exist-folder/not-exist-dev")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_can(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test no valid candidate device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_DE_devC",
+ "/dev/disk/by-id/scsi-label_DE_devD"]
+ mock_is_valid_sbd.side_effect = [False, False, False, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_has_multi(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test has multiple valid candidate devices
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_CN_devC",
+ "/dev/disk/by-id/scsi-label_CN_devD",
+ "/dev/disk/by-id/scsi-mp_China_devE",
+ "/dev/disk/by-id/scsi-mp_China_devF"]
+ mock_is_valid_sbd.side_effect = [True, False, False, True, True, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == "/dev/disk/by-id/scsi-label_CN_devD"
diff --git a/test/unittests/test_gv.py b/test/unittests/test_gv.py
new file mode 100644
index 0000000..fda7272
--- /dev/null
+++ b/test/unittests/test_gv.py
@@ -0,0 +1,36 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+import re
+
+from crmsh import crm_gv
+from crmsh import cibconfig
+
+
+def test_digits_ident():
+ g = crm_gv.gv_types["dot"]()
+ cibconfig.set_graph_attrs(g, ".")
+
+ g.new_node("1a", top_node=True)
+ g.new_attr("1a", 'label', "1a")
+ g.new_node("a", top_node=True)
+ g.new_attr("a", 'label', "a")
+
+ expected = [
+ 'fontname="Helvetica";',
+ 'fontsize="11";',
+ 'compound="true";',
+ '"1a" [label="1a"];',
+ 'a [label="a"];',
+ ]
+ out = '\n'.join(g.repr()).replace('\t', '')
+
+ for line in re.match(
+ r'^digraph G {\n\n(?P<expected>.*)\n}$', out, re.M | re.S
+ ).group('expected').split('\n'):
+ assert line in expected
+ expected.remove(line)
+
+ assert len(expected) == 0
diff --git a/test/unittests/test_handles.py b/test/unittests/test_handles.py
new file mode 100644
index 0000000..54cd634
--- /dev/null
+++ b/test/unittests/test_handles.py
@@ -0,0 +1,166 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import handles
+
+
+def test_basic():
+ t = """{{foo}}"""
+ assert "hello" == handles.parse(t, {'foo': 'hello'})
+ t = """{{foo:bar}}"""
+ assert "hello" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{wiz}}"""
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{foo}}.{{wiz}}"""
+ assert "a.b" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+ t = """Here's a line of text
+ followed by another line
+ followed by some {{foo}}.{{wiz}}
+ and then some at the end"""
+ assert """Here's a line of text
+ followed by another line
+ followed by some a.b
+ and then some at the end""" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+
+
+def test_weird_chars():
+ t = "{{foo#_bar}}"
+ assert "hello" == handles.parse(t, {'foo#_bar': 'hello'})
+ t = "{{_foo$bar_}}"
+ assert "hello" == handles.parse(t, {'_foo$bar_': 'hello'})
+
+
+def test_conditional():
+ t = """{{#foo}}before{{foo:bar}}after{{/foo}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ assert "" == handles.parse(t, {'faa': {'bar': 'hello'}})
+
+ t = """{{#cond}}before{{foo:bar}}after{{/cond}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': True})
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': False})
+
+
+def test_iteration():
+ t = """{{#foo}}!{{foo:bar}}!{{/foo}}"""
+ assert "!hello!!there!" == handles.parse(t, {'foo': [{'bar': 'hello'}, {'bar': 'there'}]})
+
+
+def test_result():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_result2():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+{{#obj}}
+{{obj}}
+{{/obj}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+primitive d0 Dummy
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_mustasche():
+ t = """Hello {{name}}
+You have just won {{value}} dollars!
+{{#in_ca}}
+Well, {{taxed_value}} dollars, after taxes.
+{{/in_ca}}
+"""
+ v = {
+ "name": "Chris",
+ "value": 10000,
+ "taxed_value": 10000 - (10000 * 0.4),
+ "in_ca": True
+ }
+
+ assert """Hello Chris
+You have just won 10000 dollars!
+Well, 6000.0 dollars, after taxes.
+""" == handles.parse(t, v)
+
+
+def test_invert():
+ t = """{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """
+No repos :(
+""" == handles.parse(t, v)
+
+
+def test_invert_2():
+ t = """foo
+{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+bar
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """foo
+No repos :(
+bar
+""" == handles.parse(t, v)
+
+
+def test_cib():
+ t = """{{filesystem}}
+{{exportfs}}
+{{rootfs}}
+{{virtual-ip}}
+clone c-{{rootfs:id}} {{rootfs:id}}
+group g-nfs
+ {{exportfs:id}}
+ {{virtual-ip:id}}
+order base-then-nfs inf: {{filesystem:id}} g-nfs
+colocation nfs-with-base inf: g-nfs {{filesystem:id}}
+order rootfs-before-nfs inf: c-{{rootfs:id}} g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-{{rootfs:id}}
+"""
+ r = """primitive fs1 Filesystem
+primitive efs exportfs
+primitive rfs rootfs
+primitive vip IPaddr2
+ params ip=192.168.0.2
+clone c-rfs rfs
+group g-nfs
+ efs
+ vip
+order base-then-nfs inf: fs1 g-nfs
+colocation nfs-with-base inf: g-nfs fs1
+order rootfs-before-nfs inf: c-rfs g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-rfs
+"""
+ v = {
+ 'filesystem': handles.value({'id': 'fs1'}, 'primitive fs1 Filesystem'),
+ 'exportfs': handles.value({'id': 'efs'}, 'primitive efs exportfs'),
+ 'rootfs': handles.value({'id': 'rfs'}, 'primitive rfs rootfs'),
+ 'virtual-ip': handles.value({'id': 'vip'},
+ 'primitive vip IPaddr2\n params ip=192.168.0.2'),
+ }
+ assert r == handles.parse(t, v)
diff --git a/test/unittests/test_lock.py b/test/unittests/test_lock.py
new file mode 100644
index 0000000..a8dc982
--- /dev/null
+++ b/test/unittests/test_lock.py
@@ -0,0 +1,271 @@
+"""
+Unitary tests for crmsh/lock.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2020-12-18
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import lock, config
+
+
+class TestLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.Lock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.local_inst = lock.Lock()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, "output data", None)
+ rc, out, err = self.local_inst._run("test_cmd")
+ mock_run.assert_called_once_with("test_cmd")
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir_false(self, mock_run):
+ mock_run.return_value = (1, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, False)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, True)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ def test_lock_or_fail(self, mock_create):
+ mock_create.return_value = False
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.local_inst._lock_or_fail()
+ self.assertEqual("Failed to claim lock (the lock directory exists at {})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+ mock_create.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_unlock(self, mock_run):
+ self.local_inst.lock_owner = True
+ self.local_inst._unlock()
+ mock_run.assert_called_once_with("rm -rf {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.local_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.local_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+
+class TestRemoteLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.RemoteLock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.lock_inst = lock.RemoteLock("node1")
+ self.lock_inst_no_wait = lock.RemoteLock("node1", wait=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run_ssh_error(self, mock_run):
+ mock_run.return_value = (255, None, "ssh error")
+ with self.assertRaises(lock.SSHError) as err:
+ self.lock_inst._run("cmd")
+ self.assertEqual("ssh error", str(err.exception))
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.lock_inst._run("cmd")
+ self.assertEqual(res, mock_run.return_value)
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ def test_lock_timeout_error_format(self):
+ config.core.lock_timeout = "pwd"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Invalid format of core.lock_timeout(should be a number)", str(err.exception))
+
+ def test_lock_timeout_min_error(self):
+ config.core.lock_timeout = "12"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Minimum value of core.lock_timeout should be 120", str(err.exception))
+
+ def test_lock_timeout(self):
+ config.core.lock_timeout = "130"
+ self.assertEqual(self.lock_inst.lock_timeout, 130)
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist_error(self, mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst._get_online_nodelist()
+ self.assertEqual("error data", str(err.exception))
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist(self, mock_run):
+ output = """
+ 1084783297 15sp2-1 member
+ 1084783193 15sp2-2 lost
+ 1084783331 15sp2-3 member
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.lock_inst._get_online_nodelist()
+ self.assertEqual(res, ["15sp2-1", "15sp2-3"])
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_break(self, mock_time, mock_time_out, mock_create):
+ mock_time.return_value = 10000
+ mock_time_out.return_value = 120
+ mock_create.return_value = True
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_called_once_with()
+ mock_time_out.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_timed_out(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10121]
+ mock_time_out.return_value = 120
+ mock_create.return_value = False
+ mock_get_nodelist.return_value = ["node2"]
+
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.lock_inst._lock_or_wait()
+ self.assertEqual("Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (node1:{})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+
+ mock_time.assert_has_calls([ mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_called_once_with()
+ mock_get_nodelist.assert_called_once_with()
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_called_once_with(10)
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_again(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10010, 10020]
+ mock_time_out.side_effect = [120, 120, 120]
+ mock_create.side_effect = [False, False, True]
+ mock_get_nodelist.side_effect = [["node1"], ["node1", "node2"]]
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_get_nodelist.assert_has_calls([mock.call(), mock.call()])
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.lock_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.lock_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_fail')
+ def test_lock_no_wait(self, mock_lock, mock_unlock):
+ with self.lock_inst_no_wait.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
diff --git a/test/unittests/test_objset.py b/test/unittests/test_objset.py
new file mode 100644
index 0000000..cae39ca
--- /dev/null
+++ b/test/unittests/test_objset.py
@@ -0,0 +1,40 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import cibconfig
+
+factory = cibconfig.cib_factory
+
+
+def assert_in(needle, haystack):
+ if needle not in haystack:
+ message = "%s not in %s" % (needle, haystack)
+ raise AssertionError(message)
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_nodes_nocli():
+ for n in factory.node_id_list():
+ obj = factory.find_object(n)
+ if obj is not None:
+ assert obj.node is not None
+ assert True == obj.cli_use_validate()
+ assert False == obj.nocli
+
+
+def test_show():
+ setobj = cibconfig.mkset_obj()
+ s = setobj.repr_nopretty()
+ sp = s.splitlines()
+ assert_in("node ha-one", sp[0:3])
diff --git a/test/unittests/test_ocfs2.py b/test/unittests/test_ocfs2.py
new file mode 100644
index 0000000..603c68d
--- /dev/null
+++ b/test/unittests/test_ocfs2.py
@@ -0,0 +1,465 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import ocfs2, utils, ra, constants
+
+logging.basicConfig(level=logging.INFO)
+
+class TestOCFS2Manager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ context1 = mock.Mock(ocfs2_devices=[])
+ self.ocfs2_inst1 = ocfs2.OCFS2Manager(context1)
+
+ context2 = mock.Mock(ocfs2_devices=[],
+ stage="ocfs2",
+ yes_to_all=True)
+ self.ocfs2_inst2 = ocfs2.OCFS2Manager(context2)
+
+ context3 = mock.Mock(ocfs2_devices=["/dev/sdb2", "/dev/sdc2"],
+ use_cluster_lvm2=False)
+ self.ocfs2_inst3 = ocfs2.OCFS2Manager(context3)
+
+ context4 = mock.Mock(ocfs2_devices=[],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst4 = ocfs2.OCFS2Manager(context4)
+
+ context5 = mock.Mock(ocfs2_devices=["/dev/sda2", "/dev/sda2"])
+ self.ocfs2_inst5 = ocfs2.OCFS2Manager(context5)
+
+ context6 = mock.Mock(ocfs2_devices=["/dev/sda2"],
+ mount_point="/data")
+ self.ocfs2_inst6 = ocfs2.OCFS2Manager(context6)
+
+ context7 = mock.Mock(ocfs2_devices=["/dev/sdb2"],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst7 = ocfs2.OCFS2Manager(context7)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_verify_packages(self, mock_installed):
+ mock_installed.side_effect = [True, False]
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst1._verify_packages(use_cluster_lvm2=True)
+ self.assertEqual("Missing required package for configuring OCFS2: lvm2-lockd", str(err.exception))
+ mock_installed.assert_has_calls([
+ mock.call("ocfs2-tools"),
+ mock.call("lvm2-lockd")
+ ])
+
+ def test_verify_options_stage_miss_option(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._verify_options()
+ self.assertEqual("ocfs2 stage require -o option", str(err.exception))
+
+ def test_verify_options_two_devices(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_options()
+ self.assertEqual("Without Cluster LVM2 (-C option), -o option only support one device", str(err.exception))
+
+ def test_verify_options_only_C(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst4._verify_options()
+ self.assertEqual("-C option only valid together with -o option", str(err.exception))
+
+ @mock.patch('crmsh.utils.has_mount_point_used')
+ def test_verify_options_mount(self, mock_mount):
+ mock_mount.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst6._verify_options()
+ self.assertEqual("Mount point /data already mounted", str(err.exception))
+ mock_mount.assert_called_once_with("/data")
+
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_not_block(self, mock_is_block):
+ mock_is_block.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_devices()
+ self.assertEqual("/dev/sdb2 doesn't look like a block device", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_lvm(self, mock_is_block, mock_lvm):
+ mock_lvm.return_value = True
+ mock_is_block.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 is a Logical Volume, cannot be used with the -C option", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.has_disk_mounted')
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_mounted(self, mock_is_block, mock_lvm, mock_mounted):
+ mock_lvm.return_value = False
+ mock_is_block.return_value = True
+ mock_mounted.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 already mounted", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+ mock_mounted.assert_called_once_with("/dev/sdb2")
+
+ def test_check_if_already_configured_return(self):
+ self.ocfs2_inst3._check_if_already_configured()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_if_already_configured(self, mock_run, mock_info):
+ mock_run.return_value = "data xxx fstype=ocfs2 sss"
+ with self.assertRaises(utils.TerminateSubCommand):
+ self.ocfs2_inst2._check_if_already_configured()
+ mock_run.assert_called_once_with("crm configure show")
+ mock_info.assert_called_once_with("Already configured OCFS2 related resources")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_devices')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_if_already_configured')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_options')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ def test_static_verify(self, mock_verify_packages, mock_verify_options, mock_configured, mock_verify_devices):
+ self.ocfs2_inst3._static_verify()
+ mock_verify_packages.assert_called_once_with(False)
+ mock_verify_options.assert_called_once_with()
+ mock_configured.assert_called_once_with()
+ mock_verify_devices.assert_called_once_with()
+
+ def test_dynamic_raise_error(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._dynamic_raise_error("error messages")
+ self.assertEqual("error messages", str(err.exception))
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ def test_check_sbd_and_ocfs2_dev(self, mock_enabled, mock_get_device, mock_error):
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb2"]
+ self.ocfs2_inst3._check_sbd_and_ocfs2_dev()
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_error.assert_called_once_with("/dev/sdb2 cannot be the same with SBD device")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev(self, mock_has_parted, mock_fstype, mock_confirm):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, False]
+ with self.assertRaises(utils.TerminateSubCommand) as err:
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev_confirmed(self, mock_has_parted, mock_fstype, mock_confirm, mock_run):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, True]
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+ mock_run.assert_has_calls([
+ mock.call("wipefs -a /dev/sdb2"),
+ mock.call("wipefs -a /dev/sdc2")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify_error(self, mock_has_stonith, mock_error):
+ mock_has_stonith.return_value = False
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_error.assert_called_once_with("OCFS2 requires stonith device configured and running")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._confirm_to_overwrite_ocfs2_dev')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_sbd_and_ocfs2_dev')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify(self, mock_has_stonith, mock_check_dev, mock_confirm):
+ mock_has_stonith.return_value = True
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_check_dev.assert_called_once_with()
+ mock_confirm.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.gen_unused_id')
+ def test_gen_ra_scripts(self, mock_gen_unused):
+ self.ocfs2_inst3.exist_ra_id_list = []
+ mock_gen_unused.return_value = "g1"
+ res = self.ocfs2_inst3._gen_ra_scripts("GROUP", {"id": "g1", "ra_string": "d vip"})
+ assert res == ("g1", "\ngroup g1 d vip")
+ mock_gen_unused.assert_called_once_with([], "g1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_mkfs(self, mock_long, mock_get_value, mock_run):
+ mock_get_value.return_value = "hacluster"
+ self.ocfs2_inst3._mkfs("/dev/sdb2")
+ mock_long.assert_called_once_with(" Creating OCFS2 filesystem for /dev/sdb2")
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+ mock_run.assert_called_once_with("mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sdb2")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_vg_change(self, mock_run):
+ self.ocfs2_inst3.vg_id = "vg1"
+ with self.ocfs2_inst3._vg_change():
+ pass
+ mock_run.assert_has_calls([
+ mock.call("vgchange -ay vg1"),
+ mock.call("vgchange -an vg1")
+ ])
+
+ @mock.patch('crmsh.utils.get_pe_number')
+ @mock.patch('crmsh.utils.gen_unused_id')
+ @mock.patch('crmsh.utils.get_all_vg_name')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_create_lv(self, mock_long, mock_run, mock_all_vg, mock_unused, mock_pe_num):
+ mock_all_vg.return_value = []
+ mock_unused.return_value = "vg1"
+ mock_pe_num.return_value = 1234
+ res = self.ocfs2_inst3._create_lv()
+ self.assertEqual(res, "/dev/vg1/ocfs2-lv")
+ mock_run.assert_has_calls([
+ mock.call("pvcreate /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("vgcreate --shared vg1 /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("lvcreate -l 1234 vg1 -n ocfs2-lv -y")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_group_and_clone_scripts(self, mock_gen):
+ mock_gen.side_effect = [("id1", "group_script\n"), ("id2", "clone_script\n")]
+ res = self.ocfs2_inst3._gen_group_and_clone_scripts(["ra1", "ra2"])
+ self.assertEqual(res, "group_script\nclone_script\n")
+ mock_gen.assert_has_calls([
+ mock.call('GROUP', {'id': 'ocfs2-group', 'ra_string': 'ra1 ra2'}),
+ mock.call('CLONE', {'id': 'ocfs2-clone', 'group_id': 'id1'})
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_fs_scripts(self, mock_gen):
+ mock_gen.return_value = "scripts"
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ res = self.ocfs2_inst3._gen_fs_scripts()
+ self.assertEqual(res, "scripts")
+ mock_gen.assert_called_once_with("Filesystem", {'id': 'ocfs2-clusterfs', 'mnt_point': '/data', 'fs_type': 'ocfs2', 'device': '/dev/sda1'})
+
+ @mock.patch('crmsh.bootstrap.wait_for_resource')
+ @mock.patch('crmsh.utils.append_res_to_group')
+ @mock.patch('crmsh.bootstrap.crm_configure_load')
+ def test_load_append_and_wait(self, mock_load, mock_append, mock_wait):
+ self.ocfs2_inst3.group_id = "g1"
+ self.ocfs2_inst3._load_append_and_wait("scripts", "res_id", "messages data")
+ mock_load.assert_called_once_with("update", "scripts")
+ mock_append.assert_called_once_with("g1", "res_id")
+ mock_wait.assert_called_once_with("messages data", "res_id")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_group_and_clone_scripts')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_dlm(self, mock_gen_ra, mock_gen_group, mock_load_wait):
+ mock_gen_ra.return_value = ("dlm_id", "dlm_scripts\n")
+ mock_gen_group.return_value = "group_scripts\n"
+ self.ocfs2_inst3._config_dlm()
+ mock_gen_ra.assert_called_once_with("DLM", {"id": "ocfs2-dlm"})
+ mock_gen_group.assert_called_once_with(["dlm_id"])
+ mock_load_wait.assert_called_once_with("dlm_scripts\ngroup_scripts\n", "dlm_id", " Wait for DLM(dlm_id) start", need_append=False)
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmlockd(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3._config_lvmlockd()
+ mock_gen_ra.assert_called_once_with("LVMLockd", {"id": "ocfs2-lvmlockd"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMLockd(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmactivate(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.vg_id = "vg1"
+ self.ocfs2_inst3._config_lvmactivate()
+ mock_gen_ra.assert_called_once_with("LVMActivate", {"id": "ocfs2-lvmactivate", "vgname": "vg1"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMActivate(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_fs_scripts')
+ @mock.patch('crmsh.utils.mkdirp')
+ def test_config_fs(self, mock_mkdir, mock_gen_fs, mock_load_wait):
+ mock_gen_fs.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3._config_fs()
+ mock_mkdir.assert_called_once_with("/data")
+ mock_gen_fs.assert_called_once_with()
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for Filesystem(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmactivate')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._vg_change')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._create_lv')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmlockd')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_lvm2(self, mock_dlm, mock_lvmlockd, mock_lv, mock_vg, mock_mkfs, mock_lvmactivate, mock_fs):
+ mock_lv.return_value = "/dev/sda1"
+ self.ocfs2_inst3._config_resource_stack_lvm2()
+ mock_dlm.assert_called_once_with()
+ mock_lvmlockd.assert_called_once_with()
+ mock_lv.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sda1")
+ mock_lvmactivate.assert_called_once_with()
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_ocfs2_along(self, mock_dlm, mock_mkfs, mock_fs):
+ self.ocfs2_inst3._config_resource_stack_ocfs2_along()
+ mock_dlm.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sdb2")
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_lvm2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst7.mount_point = "/data"
+ self.ocfs2_inst7.target_device = "/dev/vg1/lv1"
+ self.ocfs2_inst7.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/vg1/lv1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_lvm2.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_ocfs2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ self.ocfs2_inst3.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/sda1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_ocfs2.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_none(self, mock_run):
+ mock_run.return_value = "data"
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ assert res is None
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_exception(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2
+ """
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual("Filesystem require configure device", str(err.exception))
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2 device="/dev/sda2"
+ """
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual(res, "/dev/sda2")
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2_return(self, mock_find):
+ mock_find.return_value = None
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.compare_uuid_with_peer_dev')
+ @mock.patch('crmsh.utils.is_dev_a_plain_raw_disk_or_partition')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2(self, mock_find, mock_long, mock_parser, mock_verify_packages, mock_is_mapper, mock_compare):
+ mock_find.return_value = "/dev/sda2"
+ mock_parser("node1").is_resource_configured.return_value = False
+ mock_is_mapper.return_value = True
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+ mock_verify_packages.assert_called_once_with(False)
+ mock_is_mapper.assert_called_once_with("/dev/sda2", "node1")
+ mock_compare.assert_called_once_with(["/dev/sda2"], "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._static_verify')
+ def test_verify_ocfs2(self, mock_static_verify):
+ context1 = mock.Mock(ocfs2_devices=[])
+ ocfs2.OCFS2Manager.verify_ocfs2(context1)
+ mock_static_verify.assert_called_once_with()
diff --git a/test/unittests/test_parallax.py b/test/unittests/test_parallax.py
new file mode 100644
index 0000000..b934d91
--- /dev/null
+++ b/test/unittests/test_parallax.py
@@ -0,0 +1,104 @@
+from __future__ import unicode_literals
+# Copyright (C) 2019 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parallax.py
+
+
+import unittest
+from unittest import mock
+
+import crmsh.parallax
+import crmsh.prun.prun
+
+
+class TestParallax(unittest.TestCase):
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_call(["node1"], "ls")
+ self.assertEqual(
+ result,
+ [("node1", (0, None, None))],
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(255, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (0, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (1, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.SSHError("alice", "node1", "foo")
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_run(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": "/opt/node1/file.c"}
+ results = crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ self.assertListEqual([("node1", "/opt/node1/file.c")], results)
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp_exception(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo")}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": None, "node2": None}
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy_exception(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo"), "node2": None}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
diff --git a/test/unittests/test_parse.py b/test/unittests/test_parse.py
new file mode 100644
index 0000000..27b26b9
--- /dev/null
+++ b/test/unittests/test_parse.py
@@ -0,0 +1,749 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from builtins import zip
+from crmsh import parse
+import unittest
+import shlex
+from crmsh.utils import lines2cli
+from crmsh.xmlutil import xml_tostring
+from lxml import etree
+
+
+def test_score_to_kind():
+ assert parse.score_to_kind("0") == "Optional"
+ assert parse.score_to_kind("INFINITY") == "Mandatory"
+ assert parse.score_to_kind("200") == "Mandatory"
+
+
+class MockValidation(parse.Validation):
+ def resource_roles(self):
+ return ['Master', 'Slave', 'Started']
+
+ def resource_actions(self):
+ return ['start', 'stop', 'promote', 'demote']
+
+ def date_ops(self):
+ return ['lt', 'gt', 'in_range', 'date_spec']
+
+ def expression_types(self):
+ return ['normal', 'string', 'number']
+
+ def rsc_order_kinds(self):
+ return ['Mandatory', 'Optional', 'Serialize']
+
+ def op_attributes(self):
+ return ['id', 'name', 'interval', 'timeout', 'description',
+ 'start-delay', 'interval-origin', 'timeout', 'enabled',
+ 'record-pending', 'role', 'requires', 'on-fail']
+
+ def acl_2_0(self):
+ return True
+
+
+class TestBaseParser(unittest.TestCase):
+ def setUp(self):
+ self.base = parse.BaseParser()
+
+ def _reset(self, cmd):
+ self.base._cmd = shlex.split(cmd)
+ self.base._currtok = 0
+
+ @mock.patch('logging.Logger.error')
+ def test_err(self, mock_err):
+ self._reset('a:b:c:d')
+
+ def runner():
+ self.base.match_split()
+ self.assertRaises(parse.ParseError, runner)
+
+ @mock.patch('logging.Logger.error')
+ def test_idspec(self, mock_error):
+ self._reset('$id=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ self._reset('$id-ref=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id-ref')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ def runner():
+ self._reset('id=foo')
+ self.base.match_idspec()
+ self.assertRaises(parse.ParseError, runner)
+
+ def test_match_split(self):
+ self._reset('resource:role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'resource')
+ self.assertEqual(b, 'role')
+
+ self._reset('role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'role')
+ self.assertEqual(b, None)
+
+ def test_description(self):
+ self._reset('description="this is a description"')
+ self.assertEqual(self.base.try_match_description(), 'this is a description')
+
+ def test_nvpairs(self):
+ self._reset('foo=bar wiz="fizz buzz" bug= bug2=')
+ ret = self.base.match_nvpairs()
+ self.assertEqual(len(ret), 4)
+ retdict = dict([(r.get('name'), r.get('value')) for r in ret])
+ self.assertEqual(retdict['foo'], 'bar')
+ self.assertEqual(retdict['bug'], '')
+ self.assertEqual(retdict['wiz'], 'fizz buzz')
+
+
+class TestCliParser(unittest.TestCase):
+ def setUp(self):
+ parse.validator = MockValidation()
+ self.comments = []
+
+ def _parse(self, s):
+ return parse.parse(s, comments=self.comments)
+
+ @mock.patch('logging.Logger.error')
+ def test_node(self, mock_error):
+ out = self._parse('node node-1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node 1: node-1')
+ self.assertEqual(out.get('id'), '1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node testid: node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1:ping')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+ self.assertEqual(out.get('type'), 'ping')
+
+ out = self._parse('node node-1:unknown')
+ self.assertFalse(out)
+
+ out = self._parse('node node-1 description="foo bar" attributes foo=bar')
+ self.assertEqual(out.get('description'), 'foo bar')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+
+ out = self._parse('node node-1 attributes foo=bar utilization wiz=bang')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('utilization/nvpair[@name="wiz"]/@value'))
+
+ @mock.patch('logging.Logger.error')
+ def test_resources(self, mock_error):
+ out = self._parse('primitive www ocf:heartbeat:apache op monitor timeout=10s')
+ self.assertEqual(out.get('id'), 'www')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['monitor'], out.xpath('//op/@name'))
+
+ out = self._parse('rsc_template public_vm ocf:heartbeat:Xen op start timeout=300s op stop timeout=300s op monitor interval=30s timeout=60s op migrate_from timeout=600s op migrate_to timeout=600s')
+ self.assertEqual(out.get('id'), 'public_vm')
+ self.assertEqual(out.get('class'), 'ocf')
+ #print out
+
+ out = self._parse('primitive st stonith:ssh params hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out.get('id'), 'st')
+
+ out2 = self._parse('primitive st stonith:ssh hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out2.get('id'), 'st')
+
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('primitive st stonith:ssh params hostlist= meta')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('primitive st stonith:null params hostlist=node1 meta requires=nothing description="some description here" op start op monitor interval=60m')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('ms m0 resource params a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ print(xml_tostring(out))
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('instance_attributes/nvpair[@name="a"]/@value'))
+
+ out2 = self._parse('ms m0 resource a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('master ma resource meta a=b')
+ self.assertEqual(out.get('id'), 'ma')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('clone clone-1 resource meta a=b')
+ self.assertEqual(out.get('id'), 'clone-1')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('group group-1 a')
+ self.assertEqual(out.get('id'), 'group-1')
+ self.assertEqual(len(out), 1)
+
+ out = self._parse('group group-1 a b c')
+ self.assertEqual(len(out), 3)
+
+ out = self._parse('group group-1')
+ self.assertFalse(out)
+
+ out = self._parse('group group-1 params a=b')
+ self.assertEqual(len(out), 1)
+ self.assertEqual(['b'], out.xpath('/group/instance_attributes/nvpair[@name="a"]/@value'))
+
+ def test_heartbeat_class(self):
+ out = self._parse('primitive p_node-activate heartbeat:node-activate')
+ self.assertEqual(out.get('id'), 'p_node-activate')
+ self.assertEqual(out.get('class'), 'heartbeat')
+ self.assertEqual(out.get('provider'), None)
+ self.assertEqual(out.get('type'), 'node-activate')
+
+
+ def test_nvpair_ref(self):
+ out = self._parse('primitive dummy-0 Dummy params @foo')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['foo'], out.xpath('.//nvpair/@id-ref'))
+
+ out = self._parse('primitive dummy-0 Dummy params @fiz:buz')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['fiz'], out.xpath('.//nvpair/@id-ref'))
+ self.assertEqual(['buz'], out.xpath('.//nvpair/@name'))
+
+ @mock.patch('logging.Logger.error')
+ def test_location(self, mock_error):
+ out = self._parse('location loc-1 resource inf: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('location loc-1 /foo.*/ inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc-pattern'), 'foo.*')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 // inf: bar')
+ self.assertFalse(out)
+
+ out = self._parse('location loc-1 { one ( two three ) four } inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(['one', 'two', 'three', 'four'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 thing rule role=slave -inf: #uname eq madrid')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'thing')
+ self.assertEqual(out.get('score'), None)
+
+ out = self._parse('location l { a:foo b:bar }')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_colocation(self, mock_error):
+ out = self._parse('colocation col-1 inf: foo:master ( bar wiz sequential=yes )')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(['foo', 'bar', 'wiz'], out.xpath('//resource_ref/@id'))
+ self.assertEqual([], out.xpath('//resource_set[@name="sequential"]/@value'))
+
+ out = self._parse(
+ 'colocation col-1 -20: foo:Master ( bar wiz ) ( zip zoo ) node-attribute="fiz"')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(out.get('score'), '-20')
+ self.assertEqual(['foo', 'bar', 'wiz', 'zip', 'zoo'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(['fiz'], out.xpath('//@node-attribute'))
+
+ out = self._parse('colocation col-1 0: a:master b')
+ self.assertEqual(out.get('id'), 'col-1')
+
+ out = self._parse('colocation col-1 10: ) bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz ]')
+ self.assertFalse(out)
+
+ def test_order(self):
+ out = self._parse('order o1 Mandatory: [ A B sequential=true ] C')
+ print(xml_tostring(out))
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(['false'], out.xpath('/rsc_order/resource_set/@require-all'))
+ self.assertEqual(['A', 'B', 'C'], out.xpath('//resource_ref/@id'))
+
+ out = self._parse('order o1 Mandatory: [ A B sequential=false ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['require-all', 'false'] in out.resources[0][1])
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=false')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=true')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'true'] not in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order c_apache_1 Mandatory: apache:start ip_1')
+ self.assertEqual(out.get('id'), 'c_apache_1')
+
+ out = self._parse('order c_apache_2 Mandatory: apache:start ip_1 ip_2 ip_3')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'c_apache_2')
+
+ out = self._parse('order o1 Serialize: A ( B C )')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=false')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['false'], out.xpath('//@symmetrical'))
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=true')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['true'], out.xpath('//@symmetrical'))
+
+ inp = 'colocation rsc_colocation-master INFINITY: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]'
+ out = self._parse(inp)
+ self.assertEqual(2, len(out.xpath('/rsc_colocation/resource_set')))
+ self.assertEqual(out.get('id'), 'rsc_colocation-master')
+
+ out = self._parse('order order_2 Mandatory: [ A B ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'order_2')
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['false'], out.xpath('//resource_set/@sequential'))
+
+ out = self._parse('order order-1 Optional: group1:stop group2:start')
+ self.assertEqual(out.get('id'), 'order-1')
+ self.assertEqual(['Optional'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['group1'], out.xpath('/rsc_order/@first'))
+ self.assertEqual(['stop'], out.xpath('/rsc_order/@first-action'))
+ self.assertEqual(['group2'], out.xpath('/rsc_order/@then'))
+ self.assertEqual(['start'], out.xpath('/rsc_order/@then-action'))
+
+ def test_ticket(self):
+ out = self._parse('rsc_ticket ticket-A_public-ip ticket-A: public-ip')
+ self.assertEqual(out.get('id'), 'ticket-A_public-ip')
+
+ out = self._parse('rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence')
+ self.assertEqual(out.get('id'), 'ticket-A_bigdb')
+
+ out = self._parse(
+ 'rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master')
+ self.assertEqual(out.get('id'), 'ticket-B_storage')
+
+ @mock.patch('logging.Logger.error')
+ def test_bundle(self, mock_error):
+ out = self._parse('bundle httpd docker image=pcmk:httpd replicas=3 network ip-range-start=10.10.10.123 host-netmask=24 port-mapping port=80 storage storage-mapping target-dir=/var/www/html source-dir=/srv/www options=rw primitive httpd-apache')
+ self.assertEqual(out.get('id'), 'httpd')
+ self.assertEqual(['pcmk:httpd'], out.xpath('/bundle/docker/@image'))
+ self.assertEqual(['httpd-apache'], out.xpath('/bundle/crmsh-ref/@id'))
+
+ out = self._parse('bundle httpd docker image=pcmk:httpd primitive httpd-apache apache')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_op(self, mock_error):
+ out = self._parse('monitor apache:Master 10s:20s')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), 'Master')
+ self.assertEqual(out.get('interval'), '10s')
+ self.assertEqual(out.get('timeout'), '20s')
+
+ out = self._parse('monitor apache 60m')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), None)
+ self.assertEqual(out.get('interval'), '60m')
+
+ out = self._parse('primitive rsc_dummy1 Dummy op monitor interval=10 OCF_CHECK_LEVEL=10 timeout=60')
+ # incorrect ordering of attributes
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_acl(self, mock_error):
+ out = self._parse('role user-1 error')
+ self.assertFalse(out)
+ out = self._parse('user user-1 role:user-1')
+ self.assertNotEqual(out, False)
+
+ out = self._parse("role bigdb_admin " +
+ "write meta:bigdb:target-role " +
+ "write meta:bigdb:is-managed " +
+ "write location:bigdb " +
+ "read ref:bigdb")
+ self.assertEqual(4, len(out))
+
+ # new type of acls
+
+ out = self._parse("acl_target foo a")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b c")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse("acl_group fee a b c")
+ self.assertEqual('acl_group', out.tag)
+ self.assertEqual('fee', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse('role fum description="test" read a: description="test2" xpath:*[@name=\\"karl\\"]')
+ self.assertEqual(['*[@name="karl"]'], out.xpath('/acl_role/acl_permission/@xpath'))
+
+ def test_xml(self):
+ out = self._parse('xml <node uname="foo-1"/>')
+ self.assertEqual('node', out.tag)
+ self.assertEqual('foo-1', out.get('uname'))
+
+ @mock.patch('logging.Logger.error')
+ def test_property(self, mock_error):
+ out = self._parse('property stonith-enabled=true')
+ self.assertEqual(['true'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+
+ # missing score
+ out = self._parse('property rule #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['INFINITY'], out.xpath('//@score'))
+
+ out = self._parse('property rule 10: #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['node1'], out.xpath('//expression[@attribute="#uname"]/@value'))
+
+ out = self._parse('property rule +inf: date spec years=2014 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['2014'], out.xpath('//date_spec/@years'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m')
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults foo: failure-timeout=3m')
+ self.assertEqual('foo', out[0].get('id'))
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m foo:')
+ self.assertEqual(False, out)
+
+ def test_empty_property_sets(self):
+ out = self._parse('rsc_defaults defaults:')
+ self.assertEqual('<rsc_defaults><meta_attributes id="defaults"/></rsc_defaults>',
+ xml_tostring(out))
+
+ out = self._parse('op_defaults defaults:')
+ self.assertEqual('<op_defaults><meta_attributes id="defaults"/></op_defaults>',
+ xml_tostring(out))
+
+ def test_fencing(self):
+ # num test nodes are 3
+
+ out = self._parse('fencing_topology')
+ expect = '<fencing-topology/>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology poison-pill power')
+ expect = '<fencing-topology><fencing-level target="ha-one" index="1" devices="poison-pill"/><fencing-level target="ha-one" index="2" devices="power"/><fencing-level target="ha-three" index="1" devices="poison-pill"/><fencing-level target="ha-three" index="2" devices="power"/><fencing-level target="ha-two" index="1" devices="poison-pill"/><fencing-level target="ha-two" index="2" devices="power"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology node-a: poison-pill power node-b: ipmi serial')
+ self.assertEqual(4, len(out))
+
+ devs = ['stonith-vbox3-1-off', 'stonith-vbox3-2-off',
+ 'stonith-vbox3-1-on', 'stonith-vbox3-2-on']
+ out = self._parse('fencing_topology vbox4: %s' % ','.join(devs))
+ print(xml_tostring(out))
+ self.assertEqual(1, len(out))
+
+ def test_fencing_1114(self):
+ """
+ Test node attribute fence target assignment
+ """
+ out = self._parse('fencing_topology attr:rack=1 poison-pill power')
+ expect = """<fencing-topology><fencing-level index="1" devices="poison-pill" target-attribute="rack" target-value="1"/><fencing-level index="2" devices="power" target-attribute="rack" target-value="1"/></fencing-topology>"""
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology attr:rack=1 poison-pill,power')
+ expect = '<fencing-topology><fencing-level index="1" devices="poison-pill,power" target-attribute="rack" target-value="1"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ @mock.patch('logging.Logger.error')
+ def test_tag(self, mock_error):
+ out = self._parse('tag tag1: one two three')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['one', 'two', 'three'], out.xpath('/tag/obj_ref/@id'))
+
+ out = self._parse('tag tag1:')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1:: foo')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1 foo bar')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['foo', 'bar'], out.xpath('/tag/obj_ref/@id'))
+
+ def test_alerts(self):
+ "Test alerts (1.1.15+)"
+ out = self._parse('alert alert1 /tmp/foo.sh to /tmp/bar.log')
+ self.assertEqual(out.get('id'), 'alert1')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+
+ def test_alerts_brackets(self):
+ "Test alerts w/ brackets (1.1.15+)"
+ out = self._parse('alert alert2 /tmp/foo.sh to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert2')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+ self.assertEqual(['10s'],
+ out.xpath('/alert/recipient/meta_attributes/nvpair[@name="timeout"]/@value'))
+
+ def test_alerts_selectors(self):
+ "Test alerts w/ selectors (1.1.17+)"
+ out = self._parse('alert alert3 /tmp/foo.sh select nodes fencing attributes { standby shutdown } to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert3')
+ self.assertEqual(1, len(out.xpath('/alert/select/select_nodes')))
+ self.assertEqual(1, len(out.xpath('/alert/select/select_fencing')))
+ self.assertEqual(['standby', 'shutdown'],
+ out.xpath('/alert/select/select_attributes/attribute/@name'))
+
+
+ def _parse_lines(self, lines):
+ out = []
+ for line in lines2cli(lines):
+ if line is not None:
+ tmp = self._parse(line.strip())
+ self.assertNotEqual(tmp, False)
+ if tmp is not None:
+ out.append(tmp)
+ return out
+
+ def test_comments(self):
+ outp = self._parse_lines('''
+ # comment
+ node n1
+ ''')
+ self.assertNotEqual(-1, xml_tostring(outp[0]).find('# comment'))
+
+ def test_uppercase(self):
+ outp = self._parse_lines('''
+ PRIMITIVE rsc_dummy ocf:heartbeat:Dummy
+ MONITOR rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('op', outp[1].tag)
+
+ outp = self._parse_lines('''
+ PRIMITIVE testfs ocf:heartbeat:Filesystem \
+ PARAMS directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ CLONE testfs-clone testfs \
+ META ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('clone', outp[1].tag)
+
+ out = self._parse('LOCATION loc-1 resource INF: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('NODE node-1 ATTRIBUTES foo=bar UTILIZATION wiz=bang')
+ self.assertEqual('node-1', out.get('uname'))
+ self.assertEqual(['bar'], out.xpath('/node/instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('/node/utilization/nvpair[@name="wiz"]/@value'))
+
+ out = self._parse('PRIMITIVE virtual-ip ocf:heartbeat:IPaddr2 PARAMS ip=192.168.122.13 lvs_support=false OP start timeout=20 interval=0 OP stop timeout=20 interval=0 OP monitor interval=10 timeout=20')
+ self.assertEqual(['192.168.122.13'], out.xpath('//instance_attributes/nvpair[@name="ip"]/@value'))
+
+ out = self._parse('GROUP web-server virtual-ip apache META target-role=Started')
+ self.assertEqual(out.get('id'), 'web-server')
+
+ def test_nvpair_novalue(self):
+ inp = """primitive stonith_ipmi-karl stonith:fence_ipmilan \
+ params pcmk_host_list=karl verbose action=reboot \
+ ipaddr=10.43.242.221 login=root passwd=dummy method=onoff \
+ op start interval=0 timeout=60 \
+ op stop interval=0 timeout=60 \
+ op monitor interval=600 timeout=60 \
+ meta target-role=Started"""
+
+ outp = self._parse_lines(inp)
+ self.assertEqual(len(outp), 1)
+ self.assertEqual('primitive', outp[0].tag)
+ # print xml_tostring(outp[0])
+ verbose = outp[0].xpath('//nvpair[@name="verbose"]')
+ self.assertEqual(len(verbose), 1)
+ self.assertTrue('value' not in verbose[0].attrib)
+
+ @mock.patch('logging.Logger.error')
+ def test_configs(self, mock_error):
+ outp = self._parse_lines('''
+ primitive rsc_dummy ocf:heartbeat:Dummy
+ monitor rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ outp = self._parse_lines('''
+ primitive testfs ocf:heartbeat:Filesystem \
+ params directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ inp = [
+ """node node1 attributes mem=16G""",
+ """node node2 utilization cpu=4""",
+ """primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires="nothing" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s""",
+ """primitive st2 stonith:ssh \
+ params hostlist='node1 node2'""",
+ """primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10""",
+ """monitor d1 60s:30s""",
+ """primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s""",
+ """monitor d2:Started 60s:30s""",
+ """group g1 d1 d2""",
+ """primitive d3 ocf:pacemaker:Dummy""",
+ """clone c d3 \
+ meta clone-max=1""",
+ """primitive d4 ocf:pacemaker:Dummy""",
+ """ms m d4""",
+ """primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops""",
+ """primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1""",
+ """ms m5 s5""",
+ """ms m6 s6""",
+ """location l1 g1 100: node1""",
+ """location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1""",
+ """location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0""",
+ """location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0""",
+ """location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date date_spec years="2009" hours=09-17""",
+ """location l6 m5 \
+ rule $id-ref=l2-rule1""",
+ """location l7 m5 \
+ rule $id-ref=l2""",
+ """collocation c1 inf: m6 m5""",
+ """collocation c2 inf: m5:Master d1:Started""",
+ """order o1 Mandatory: m5 m6""",
+ """order o2 Optional: d1:start m5:promote""",
+ """order o3 Serialize: m5 m6""",
+ """order o4 inf: m5 m6""",
+ """rsc_ticket ticket-A_m6 ticket-A: m6""",
+ """rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence""",
+ """rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence""",
+ """fencing_topology st st2""",
+ """property stonith-enabled=true""",
+ """property $id=cpset2 maintenance-mode=true""",
+ """rsc_defaults failure-timeout=10m""",
+ """op_defaults $id=opsdef2 record-pending=true"""]
+
+ outp = self._parse_lines('\n'.join(inp))
+ a = [xml_tostring(x) for x in outp]
+ b = [
+ '<node uname="node1"><instance_attributes><nvpair name="mem" value="16G"/></instance_attributes></node>',
+ '<node uname="node2"><utilization><nvpair name="cpu" value="4"/></utilization></node>',
+ '<primitive id="st" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes><meta_attributes><nvpair name="target-role" value="Started"/><nvpair name="requires" value="nothing"/></meta_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="monitor" interval="60m" timeout="60s"/></operations></primitive>',
+ '<primitive id="st2" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes></primitive>',
+ '<primitive id="d1" class="ocf" provider="pacemaker" type="Dummy"><operations id="d1-ops"><op name="monitor" interval="60m"/><op name="monitor" interval="120m"><instance_attributes><nvpair name="OCF_CHECK_LEVEL" value="10"/></instance_attributes></op></operations></primitive>',
+ '<op name="monitor" rsc="d1" interval="60s" timeout="30s"/>',
+ '<primitive id="d2" class="ocf" provider="heartbeat" type="Delay"><instance_attributes><nvpair name="mondelay" value="60"/></instance_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="stop" timeout="60s" interval="0s"/></operations></primitive>',
+ '<op name="monitor" role="Started" rsc="d2" interval="60s" timeout="30s"/>',
+ '<group id="g1"><crmsh-ref id="d1"/><crmsh-ref id="d2"/></group>',
+ '<primitive id="d3" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<clone id="c"><meta_attributes><nvpair name="clone-max" value="1"/></meta_attributes><crmsh-ref id="d3"/></clone>',
+ '<primitive id="d4" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<master id="m"><crmsh-ref id="d4"/></master>',
+ '<primitive id="s5" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1-ops"/></primitive>',
+ '<primitive id="s6" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1"/></primitive>',
+ '<master id="m5"><crmsh-ref id="s5"/></master>',
+ '<master id="m6"><crmsh-ref id="s6"/></master>',
+ '<rsc_location id="l1" rsc="g1" score="100" node="node1"/>',
+ '<rsc_location id="l2" rsc="c"><rule id="l2-rule1" score="100"><expression operation="eq" attribute="#uname" value="node1"/></rule></rsc_location>',
+ '<rsc_location id="l3" rsc="m5"><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l4" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l5" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule><rule score="INFINITY"><date_expression operation="lt" end="2009-05-26"/><date_expression operation="in_range" start="2009-05-26" end="2009-07-26"/><date_expression operation="in_range" start="2009-05-26"><duration years="2009"/></date_expression><date_expression operation="date_spec"><date_spec years="2009" hours="09-17"/></date_expression></rule></rsc_location>',
+ '<rsc_location id="l6" rsc="m5"><rule id-ref="l2-rule1"/></rsc_location>',
+ '<rsc_location id="l7" rsc="m5"><rule id-ref="l2"/></rsc_location>',
+ '<rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>',
+ '<rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>',
+ '<rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>',
+ '<rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>',
+ '<rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>',
+ '<rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence"><resource_set><resource_ref id="m6"/><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence"><resource_set><resource_ref id="m6"/></resource_set><resource_set role="Master"><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<fencing-topology><fencing-level target="ha-one" index="1" devices="st"/><fencing-level target="ha-one" index="2" devices="st2"/><fencing-level target="ha-three" index="1" devices="st"/><fencing-level target="ha-three" index="2" devices="st2"/><fencing-level target="ha-two" index="1" devices="st"/><fencing-level target="ha-two" index="2" devices="st2"/></fencing-topology>',
+ '<cluster_property_set><nvpair name="stonith-enabled" value="true"/></cluster_property_set>',
+ '<cluster_property_set id="cpset2"><nvpair name="maintenance-mode" value="true"/></cluster_property_set>',
+ '<rsc_defaults><meta_attributes><nvpair name="failure-timeout" value="10m"/></meta_attributes></rsc_defaults>',
+ '<op_defaults><meta_attributes id="opsdef2"><nvpair name="record-pending" value="true"/></meta_attributes></op_defaults>',
+ ]
+
+ for result, expected in zip(a, b):
+ self.maxDiff = None
+ self.assertEqual(expected, result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_prun.py b/test/unittests/test_prun.py
new file mode 100644
index 0000000..7e987bf
--- /dev/null
+++ b/test/unittests/test_prun.py
@@ -0,0 +1,157 @@
+import typing
+
+import crmsh.constants
+import crmsh.prun.prun
+import crmsh.prun.runner
+
+import unittest
+from unittest import mock
+
+
+class TestPrun(unittest.TestCase):
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'bob'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'bob'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_root(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "root", "root"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_geteuid.assert_not_called()
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'root'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_localhost(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo"}
+ #mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = True
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_is_local_host.assert_called_once_with('host1')
+ mock_runner_add_task.assert_called_once_with(
+ TaskArgumentsEq(
+ ['/bin/sh'],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )
+ )
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1"}, set(results.keys()))
+
+
+class TaskArgumentsEq(crmsh.prun.runner.Task):
+ def __eq__(self, other):
+ if not isinstance(other, crmsh.prun.runner.Task):
+ return False
+ return self.args == other.args \
+ and self.input == other.input \
+ and self.stdout_config == other.stdout_config \
+ and self.stderr_config == other.stderr_config \
+ and self.context == other.context
diff --git a/test/unittests/test_qdevice.py b/test/unittests/test_qdevice.py
new file mode 100644
index 0000000..f6b2f13
--- /dev/null
+++ b/test/unittests/test_qdevice.py
@@ -0,0 +1,1031 @@
+import os
+import unittest
+import socket
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+from crmsh import qdevice, lock
+
+
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_restart(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '1', 'Total': '1'}
+ mock_quorate.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD, False, False)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_reload(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(3, 2)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_later(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_claim_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.ClaimLockError
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_fatal.assert_called_once_with("Duplicated cluster name \"cluster1\"!")
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_ssh_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+class TestQDevice(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+ self.qdevice_with_hostname = qdevice.QDevice("node.qnetd")
+ self.qdevice_with_invalid_port = qdevice.QDevice("10.10.10.123", port=100)
+ self.qdevice_with_invalid_tie_breaker = qdevice.QDevice("10.10.10.123", tie_breaker="wrong")
+ self.qdevice_with_ip_cluster_node = qdevice.QDevice("10.10.10.123", cluster_node="node1.com")
+ self.qdevice_with_invalid_cmds_relative_path = qdevice.QDevice("10.10.10.123", cmds="ls")
+ self.qdevice_with_invalid_cmds_not_exist = qdevice.QDevice("10.10.10.123", cmds="/not_exist")
+ self.qdevice_with_cluster_name = qdevice.QDevice("10.10.10.123", cluster_name="hacluster1")
+ self.qdevice_with_stage_cluster_name = qdevice.QDevice("10.10.10.123", is_stage=True, cluster_name="cluster1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_qnetd_cacert_on_local(self):
+ res = self.qdevice_with_ip.qnetd_cacert_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt")
+
+ def test_qnetd_cacert_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qnetd_cacert_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt")
+
+ def test_qdevice_crq_on_qnetd(self):
+ res = self.qdevice_with_cluster_name.qdevice_crq_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.hacluster1")
+
+ def test_qdevice_crq_on_local(self):
+ res = self.qdevice_with_ip.qdevice_crq_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq")
+
+ def test_qnetd_cluster_crt_on_qnetd(self):
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/cluster-None.crt")
+
+ @mock.patch('os.path.basename')
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ def test_qnetd_cluster_crt_on_local(self, mock_qnetd_crt, mock_basename):
+ mock_qnetd_crt.return_value = "crt_file"
+ mock_basename.return_value = "crt_file"
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/crt_file")
+
+ def test_qdevice_p12_on_local(self):
+ res = self.qdevice_with_ip.qdevice_p12_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12")
+
+ def test_qdevice_p12_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qdevice_p12_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12")
+
+ @mock.patch('crmsh.utils.check_port_open')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_port_error(self, mock_getaddrinfo, mock_ping, mock_in_local, mock_check):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = False
+ mock_check.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "ssh service on \"qnetd-node\" not available"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_local(self, mock_getaddrinfo, mock_ping, mock_in_local):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = True
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host for qnetd must be a remote one"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr(self, mock_getaddrinfo):
+ mock_getaddrinfo.side_effect = socket.error
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host \"qnetd-node\" is unreachable"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.valid_port')
+ def test_check_qdevice_port(self, mock_port):
+ mock_port.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_port("1")
+ excepted_err_string = "invalid qdevice port range(1024 - 65535)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_algo(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_algo("1")
+ excepted_err_string = "invalid ALGORITHM choice: '1' (choose from 'ffsplit', 'lms')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_check_qdevice_tie_breaker(self, mock_is_active):
+ mock_is_active.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tie_breaker("1")
+ excepted_err_string = "invalid qdevice tie_breaker(lowest/highest/valid_node_id)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_tls(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tls("1")
+ excepted_err_string = "invalid TLS choice: '1' (choose from 'on', 'off', 'required')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_hm(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics_mode("1")
+ excepted_err_string = "invalid MODE choice: '1' (choose from 'on', 'sync', 'off')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_he_path_error(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("command1")
+ excepted_err_string = "commands for heuristics should be absolute path"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('os.path.exists')
+ def test_check_qdevice_he_not_exist_erro(self, mock_exists):
+ mock_exists.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("/usr/bin/testst")
+ excepted_err_string = "command /usr/bin/testst not exist"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_check_package_installed(self, mock_installed):
+ mock_installed.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_package_installed("corosync-qdevice")
+ excepted_err_string = "Package \"corosync-qdevice\" not installed on this node"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics_mode')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tls')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tie_breaker')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_algo')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_port')
+ @mock.patch('crmsh.qdevice.QDevice.check_qnetd_addr')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ def test_valid_qdevice_options(self, mock_installed, mock_check_qnetd, mock_check_port,
+ mock_check_algo, mock_check_tie, mock_check_tls, mock_check_h, mock_check_hm):
+ self.qdevice_with_ip.valid_qdevice_options()
+ mock_installed.assert_called_once_with("corosync-qdevice")
+ mock_check_qnetd.assert_called_once_with("10.10.10.123")
+
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_not_installed(self, mock_installed):
+ self.qdevice_with_ip.qnetd_ip = "10.10.10.123"
+ mock_installed.return_value = False
+ excepted_err_string = 'Package "corosync-qnetd" not installed on 10.10.10.123!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, install "corosync-qnetd" on 10.10.10.123.\nThen run command "crm cluster init" with "qdevice" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately.'
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_ip.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_with_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = True
+ mock_run.return_value = "data"
+ excepted_err_string = "This cluster's name \"cluster1\" already exists on qnetd server!\nPlease consider to use the different cluster-name property."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_stage_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("corosync-qnetd-tool -l -c cluster1", "10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_without_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = False
+ excepted_err_string = "This cluster's name \"hacluster1\" already exists on qnetd server!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, consider to use the different cluster-name property.\nThen run command \"crm cluster init\" with \"qdevice\" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("test -f /etc/corosync/qnetd/nssdb/cluster-hacluster1.crt", "10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.enable_service")
+ def test_enable_qnetd(self, mock_enable):
+ self.qdevice_with_ip.enable_qnetd()
+ mock_enable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.disable_service")
+ def test_disable_qnetd(self, mock_disable):
+ self.qdevice_with_ip.disable_qnetd()
+ mock_disable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.start_service")
+ def test_start_qnetd(self, mock_start):
+ self.qdevice_with_ip.start_qnetd()
+ mock_start.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.stop_service")
+ def test_stop_qnetd(self, mock_stop):
+ self.qdevice_with_ip.stop_qnetd()
+ mock_stop.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd_already_exists(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.return_value = [("10.10.10.123", (0, None, None))]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "test -f {}".format(mock_qnetd_cacert.return_value))
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.side_effect = [ValueError(mock.Mock(), "Failed on 10.10.10.123: error happen"),
+ [("10.10.10.123", (0, None, None))]]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+
+ mock_call.assert_has_calls([
+ mock.call(["10.10.10.123"], "test -f {}".format(mock_qnetd_cacert.return_value)),
+ mock.call(["10.10.10.123"], "corosync-qnetd-certutil -i")
+ ])
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 1: Initialize database on 10.10.10.123",
+ 'corosync-qnetd-certutil -i')
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd_exist(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = True
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 2: Fetch qnetd-cacert.crt from 10.10.10.123")
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_qnetd_crt_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("os.path.dirname")
+ def test_copy_qnetd_crt_to_cluster(self, mock_dirname, mock_qnetd_cacert_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_dirname.return_value = "/etc/corosync/qdevice/net/10.10.10.123"
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 3: Copy exported qnetd-cacert.crt to ['node2.com']")
+ mock_copy.assert_called_once_with(["node2.com"], mock_dirname.return_value,
+ "/etc/corosync/qdevice/net", True)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ def test_init_db_on_cluster(self, mock_list_nodes, mock_qnetd_cacert_local, mock_call, mock_log):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_call.return_value = [("node1", (0, None, None)), ("node2", (0, None, None))]
+
+ self.qdevice_with_ip.init_db_on_cluster()
+
+ mock_list_nodes.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 4: Initialize database on ['node1', 'node2']",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt')
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ "corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ def test_create_ca_request(self, mock_stdout_stderr, mock_log):
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_cluster_name.create_ca_request()
+
+ mock_log.assert_called_once_with("Step 5: Generate certificate request qdevice-net-node.crq",
+ 'corosync-qdevice-net-certutil -r -n hacluster1')
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -r -n hacluster1")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_crq_to_qnetd(self, mock_copy, mock_qdevice_crq_local,
+ mock_qdevice_crq_qnetd, mock_log):
+ mock_qdevice_crq_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq"
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+
+ self.qdevice_with_ip.copy_crq_to_qnetd()
+
+ mock_log.assert_called_once_with("Step 6: Copy qdevice-net-node.crq to 10.10.10.123")
+ mock_copy.assert_called_once_with(["10.10.10.123"], mock_qdevice_crq_local.return_value,
+ mock_qdevice_crq_qnetd.return_value, False)
+ mock_qdevice_crq_local.assert_called_once_with()
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ def test_sign_crq_on_qnetd(self, mock_qdevice_crq_qnetd, mock_call, mock_log):
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+ mock_call.return_value = ["10.10.10.123", (0, None, None)]
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.sign_crq_on_qnetd()
+
+ mock_log.assert_called_once_with("Step 7: Sign and export cluster certificate on 10.10.10.123",
+ 'corosync-qnetd-certutil -s -c /etc/corosync/qnetd/nssdb/qdevice-net-node.crq -n hacluster')
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "corosync-qnetd-certutil -s -c {} -n hacluster".format(mock_qdevice_crq_qnetd.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_cluster_crt_from_qnetd(self, mock_parallax_slurp, mock_crt_on_qnetd, mock_log):
+ mock_crt_on_qnetd.return_value = "/etc/corosync/qnetd/nssdb/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.fetch_cluster_crt_from_qnetd()
+
+ mock_log.assert_called_once_with("Step 8: Fetch cluster-hacluster.crt from 10.10.10.123")
+ mock_crt_on_qnetd.assert_has_calls([mock.call(), mock.call()])
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", mock_crt_on_qnetd.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_local", new_callable=mock.PropertyMock)
+ def test_import_cluster_crt(self, mock_crt_on_local, mock_stdout_stderr, mock_log):
+ mock_crt_on_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.import_cluster_crt()
+
+ mock_log.assert_called_once_with("Step 9: Import certificate file cluster-hacluster.crt on local",
+ 'corosync-qdevice-net-certutil -M -c /etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt')
+ mock_crt_on_local.assert_has_calls([mock.call(), mock.call()])
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -M -c {}".format(mock_crt_on_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_p12_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ def test_copy_p12_to_cluster(self, mock_p12_on_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_called_once_with("Step 10: Copy qdevice-net-node.p12 to ['node2.com']")
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_called_once_with(["node2.com"], mock_p12_on_local.return_value,
+ mock_p12_on_local.return_value, False)
+ mock_p12_on_local.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster_one_node(self, mock_list_nodes, mock_call, mock_log):
+ mock_list_nodes.return_value = []
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_not_called()
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster(self, mock_list_nodes, mock_p12_on_local, mock_log, mock_call):
+ mock_list_nodes.return_value = ["node2", "node3"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+ mock_call.return_value = [("node2", (0, None, None)), ("node3", (0, None, None))]
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_called_once_with("Step 11: Import qdevice-net-node.p12 on ['node2', 'node3']",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(
+ ["node2", "node3"],
+ "corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_local.return_value))
+ mock_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_p12_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.import_cluster_crt")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_cluster_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.sign_crq_on_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.copy_crq_to_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.create_ca_request")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_qnetd_crt_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_qnetd")
+ def test_certificate_process_on_init(self, mock_init_db_on_qnetd, mock_fetch_qnetd_crt_from_qnetd,
+ mock_copy_qnetd_crt_to_cluster, mock_init_db_on_cluster, mock_create_ca_request,
+ mock_copy_crq_to_qnetd, mock_sign_crq_on_qnetd, mock_fetch_cluster_crt_from_qnetd,
+ mock_import_cluster_crt, mock_copy_p12_to_cluster, mock_import_p12_on_cluster):
+
+ self.qdevice_with_ip.certificate_process_on_init()
+ mock_init_db_on_qnetd.assert_called_once_with()
+ mock_fetch_qnetd_crt_from_qnetd.assert_called_once_with()
+ mock_copy_qnetd_crt_to_cluster.assert_called_once_with()
+ mock_init_db_on_cluster.assert_called_once_with()
+ mock_create_ca_request.assert_called_once_with()
+ mock_copy_crq_to_qnetd.assert_called_once_with()
+ mock_sign_crq_on_qnetd.assert_called_once_with()
+ mock_fetch_cluster_crt_from_qnetd.assert_called_once_with()
+ mock_import_cluster_crt.assert_called_once_with()
+ mock_copy_p12_to_cluster.assert_called_once_with()
+ mock_import_p12_on_cluster.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster_exist(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_called_once_with("Step 1: Fetch qnetd-cacert.crt from node1.com")
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], "/etc/corosync/qdevice/net", mock_qnetd_cacert_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ def test_init_db_on_local(self, mock_qnetd_cacert_cluster, mock_stdout_stderr, mock_log):
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_ip_cluster_node.init_db_on_local()
+
+ mock_log.assert_called_once_with("Step 2: Initialize database on local",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt')
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster_exist(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_called_once_with("Step 3: Fetch qdevice-net-node.p12 from node1.com")
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], '/etc/corosync/qdevice/net', mock_p12_on_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ def test_import_p12_on_local(self, mock_p12_on_cluster, mock_stdout_stderr, mock_log):
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.import_p12_on_local()
+
+ mock_log.assert_called_once_with("Step 4: Import cluster certificate and key",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12')
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_p12_from_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_cluster")
+ def test_certificate_process_on_join(self, mock_fetch_qnetd_crt_from_cluster, mock_init_db_on_local,
+ mock_fetch_p12_from_cluster, mock_import_p12_on_local):
+ self.qdevice_with_ip.certificate_process_on_join()
+ mock_fetch_qnetd_crt_from_cluster.assert_called_once_with()
+ mock_init_db_on_local.assert_called_once_with()
+ mock_fetch_p12_from_cluster.assert_called_once_with()
+ mock_import_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_write_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_mksection, mock_str2file):
+ mock_mksection.side_effect = [
+ ["device {", "}"],
+ ["net {", "}"]
+ ]
+ mock_read_file.return_value = "data"
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.write_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.add.assert_has_calls([
+ mock.call('quorum', ["device {", "}"]),
+ mock.call('quorum.device', ["net {", "}"])
+ ])
+ mock_instance.set.assert_has_calls([
+ mock.call('quorum.device.votes', '1'),
+ mock.call('quorum.device.model', 'net'),
+ mock.call('quorum.device.net.tls', 'on'),
+ mock.call('quorum.device.net.host', '10.10.10.123'),
+ mock.call('quorum.device.net.port', 5403),
+ mock.call('quorum.device.net.algorithm', 'ffsplit'),
+ mock.call('quorum.device.net.tie_breaker', 'lowest')
+ ])
+ mock_instance.to_string.assert_called_once_with()
+ mock_mksection.assert_has_calls([
+ mock.call('quorum.device', []),
+ mock.call('quorum.device.net', [])
+ ])
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_remove_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_str2file):
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.remove_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db_not_exist(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_not_called()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = True
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_call.return_value = [("node1.com", (0, None, None)), ("node2.com", (0, None, None))]
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ 'rm -rf /etc/corosync/qdevice/net/*'.format())
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_qdevice_vote(self, mock_run, mock_get_value, mock_warning):
+ data = """
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 A,V,NMW 192.168.122.221 (local)
+ 0 0 Qdevice
+ """
+ mock_run.return_value = data
+ mock_get_value.return_value = "qnetd-node"
+ qdevice.QDevice.check_qdevice_vote()
+ mock_run.assert_called_once_with("corosync-quorumtool -s", success_exit_status={0, 2})
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_warning.assert_called_once_with("Qdevice's vote is 0, which simply means Qdevice can't talk to Qnetd(qnetd-node) for various reasons.")
+
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ def test_config_and_start_qdevice(self, mock_rm_db, mock_status_long, mock_evaluate):
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.certificate_process_on_init = mock.Mock()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice = mock.Mock()
+ self.qdevice_with_ip.config_qdevice = mock.Mock()
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ self.qdevice_with_ip.config_and_start_qdevice.__wrapped__(self.qdevice_with_ip)
+
+ mock_rm_db.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Qdevice certification process")
+ self.qdevice_with_ip.certificate_process_on_init.assert_called_once_with()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.config_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ def test_adjust_sbd_watchdog_timeout_with_qdevice(self, mock_check_reachable, mock_using_diskless_sbd, mock_get_sbd_value, mock_update_config, mock_get_timeout, mock_set_property):
+ mock_using_diskless_sbd.return_value = True
+ mock_get_sbd_value.return_value = ""
+ mock_get_timeout.return_value = 100
+
+ self.qdevice_with_stage_cluster_name.adjust_sbd_watchdog_timeout_with_qdevice()
+
+ mock_check_reachable.assert_called_once_with()
+ mock_using_diskless_sbd.assert_called_once_with()
+ mock_get_sbd_value.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+ mock_update_config.assert_called_once_with({"SBD_WATCHDOG_TIMEOUT": str(sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE)})
+ mock_set_property.assert_called_once_with("stonith-timeout", 100)
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_reload(self, mock_status, mock_cluster_run, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Starting corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("systemctl restart corosync-qdevice")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_restart(self, mock_status, mock_cluster_run, mock_wait, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Restarting cluster service"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_wait.assert_called_once_with()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("crm cluster restart")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_warn(self, mock_status, mock_cluster_run, mock_warn, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ ])
+ mock_warn.assert_called_once_with("To use qdevice service, need to restart cluster service manually on each node")
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.qdevice.QDevice.write_qdevice_config')
+ def test_config_qdevice(self, mock_write, mock_is_unicast, mock_add_nodelist, mock_status_long,
+ mock_update_votes, mock_run):
+ mock_is_unicast.return_value = False
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.config_qdevice()
+
+ mock_write.assert_called_once_with()
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Update configuration")
+ mock_update_votes.assert_called_once_with()
+ mock_run.assert_called_once_with("crm corosync reload")
+
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd_return(self, mock_configured):
+ mock_configured.return_value = False
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd(self, mock_configured, mock_get_value, mock_run):
+ mock_configured.return_value = True
+ mock_get_value.side_effect = ["qnetd-node", "cluster1"]
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("quorum.device.net.host"),
+ mock.call("totem.cluster_name")])
+ crt_file = "/etc/corosync/qnetd/nssdb/cluster-cluster1.crt"
+ crt_cmd = "test -f {crt_file} && rm -f {crt_file}".format(crt_file=crt_file)
+ crq_file = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.cluster1"
+ crq_cmd = "test -f {crq_file} && rm -f {crq_file}".format(crq_file=crq_file)
+ mock_run.assert_has_calls([
+ mock.call(crt_cmd, "qnetd-node"),
+ mock.call(crq_cmd, "qnetd-node")])
diff --git a/test/unittests/test_ratrace.py b/test/unittests/test_ratrace.py
new file mode 100644
index 0000000..6734b89
--- /dev/null
+++ b/test/unittests/test_ratrace.py
@@ -0,0 +1,131 @@
+import unittest
+from lxml import etree
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import cibconfig
+from crmsh.ui_context import Context
+from crmsh.ui_resource import RscMgmt
+from crmsh.ui_root import Root
+
+
+class TestRATrace(unittest.TestCase):
+ """Unit tests for enabling/disabling RA tracing."""
+
+ context = Context(Root())
+ factory = cibconfig.cib_factory
+
+ def setUp(self):
+ self.factory._push_state()
+
+ def tearDown(self):
+ self.factory._pop_state()
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_resource(self, mock_error):
+ """Check setting RA tracing for a resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy"/>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the resource.
+ RscMgmt()._trace_resource(self.context, obj.obj_id, obj, '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0', 'r1-stop-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-stop-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the resource.
+ RscMgmt()._untrace_resource(self.context, obj.obj_id, obj)
+ self.assertEqual(obj.node.xpath('operations/op/@id'), [])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op(self, mock_error):
+ """Check setting RA tracing for a specific operation."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'invalid-op')
+ self.assertEqual(str(err.exception), "Operation invalid-op not found in r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_new(self, mock_error):
+ """Check setting RA tracing for an operation that is not in CIB."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace a regular operation that is not yet defined in CIB. The request
+ # should succeed and introduce an op node for the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'start', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Try tracing the monitor operation in the same way. The request should
+ # get rejected because no explicit interval is specified.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(str(err.exception), "No monitor operation configured for r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_stateful(self, mock_error):
+ """Check setting RA tracing for an operation on a stateful resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor" role="Master"/>
+ <op id="r1-monitor-11" interval="11" name="monitor" role="Slave"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-11"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_interval(self, mock_error):
+ """Check setting RA tracing for an operation+interval."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'invalid-op', '10')
+ self.assertEqual(str(err.exception), "Operation invalid-op with interval 10 not found in r1")
diff --git a/test/unittests/test_report_collect.py b/test/unittests/test_report_collect.py
new file mode 100644
index 0000000..faec951
--- /dev/null
+++ b/test/unittests/test_report_collect.py
@@ -0,0 +1,588 @@
+from subprocess import TimeoutExpired
+from crmsh.report import collect, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestCollect(unittest.TestCase):
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log_no_config(self, mock_isfile, mock_warning):
+ mock_isfile.side_effect = [False, False, False]
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log"),
+ mock.call("/var/log/pacemaker.log")
+ ])
+ mock_warning.assert_called_once_with("No valid pacemaker log file found")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log(self, mock_isfile, mock_read, mock_warning):
+ mock_isfile.return_value = True
+ mock_read.return_value = """
+# has been enabled, those as well). This log is of more use to developers and
+# advanced system administrators, and when reporting problems.
+PCMK_logfile=/var/log/pacemaker/pacemaker.log
+
+# Set the permissions on the above log file to owner/group read/write
+ """
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "/var/log/pacemaker/pacemaker.log")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log")
+ ])
+ mock_read.assert_called_once_with(constants.PCMKCONF)
+
+ @mock.patch('crmsh.report.utils.dump_logset')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.collect.get_pcmk_log')
+ @mock.patch('crmsh.report.collect.get_corosync_log')
+ def test_collect_ha_logs(self, mock_corosync_log, mock_get_log, mock_isfile, mock_dump):
+ mock_corosync_log.return_value = "/var/log/cluster/corosync.log"
+ mock_get_log.return_value = "/var/pacemaker.log"
+ mock_isfile.side_effect = [True, True]
+ mock_ctx_inst = mock.Mock(extra_log_list=[])
+
+ collect.collect_ha_logs(mock_ctx_inst)
+
+ mock_get_log.assert_called_once_with()
+ mock_isfile.assert_has_calls([
+ mock.call(mock_get_log.return_value),
+ mock.call(mock_corosync_log.return_value)
+ ])
+ mock_dump.assert_has_calls([
+ mock.call(mock_ctx_inst, mock_get_log.return_value),
+ mock.call(mock_ctx_inst, mock_corosync_log.return_value)
+ ])
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log_not_exist(self, mock_conf, mock_exists, mock_warning):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_exists.return_value = False
+ self.assertEqual(collect.get_corosync_log(), "")
+
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log(self, mock_conf, mock_exists, mock_get_value):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_get_value.return_value = "/var/log/cluster/corosync.log"
+ mock_exists.return_value = True
+ self.assertEqual(collect.get_corosync_log(), mock_get_value.return_value)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_collect_journal_logs(self, mock_ts_to_str, mock_get_cmd_output,
+ mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.JOURNAL_F,
+ constants.JOURNAL_PCMK_F,
+ constants.JOURNAL_COROSYNC_F,
+ constants.JOURNAL_SBD_F
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1234, to_time=5678, work_dir="/opt/work")
+ mock_ts_to_str.side_effect = ["10.10", "10.12"]
+ mock_get_cmd_output.side_effect = ["data_default", "data_pacemaker", "data_corosync", "data_sbd"]
+ collect.collect_journal_logs(mock_ctx_inst)
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+ cmd_list = [
+ 'journalctl -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u pacemaker -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u corosync -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u sbd -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2'
+ ]
+ mock_get_cmd_output.assert_has_calls([
+ mock.call(cmd_list[0]),
+ mock.call(cmd_list[1]),
+ mock.call(cmd_list[2]),
+ mock.call(cmd_list[3]),
+ ])
+ mock_logger.debug2.assert_has_calls([
+ mock.call("Collect journal logs since: 10.10 until: 10.12"),
+ mock.call(f"Running command: {cmd_list[0]}"),
+ mock.call(f"Running command: {cmd_list[1]}"),
+ mock.call(f"Running command: {cmd_list[2]}"),
+ mock.call(f"Running command: {cmd_list[3]}"),
+ ])
+ mock_logger.debug.assert_has_calls([
+ mock.call(f"Dump jounal log for default into {constants.JOURNAL_F}"),
+ mock.call(f"Dump jounal log for pacemaker into {constants.JOURNAL_PCMK_F}"),
+ mock.call(f"Dump jounal log for corosync into {constants.JOURNAL_COROSYNC_F}"),
+ mock.call(f"Dump jounal log for sbd into {constants.JOURNAL_SBD_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process_empty(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, None, None)
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 0\n")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.side_effect = [
+ (0, "1000", None),
+ (0, "data1", None),
+ (0, "data2", None)
+ ]
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 1\npid: 1000 comm: data1\ndata2\n\n")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("ps aux|awk '$8 ~ /^D/{print $2}'"),
+ mock.call('cat /proc/1000/comm'),
+ mock.call('cat /proc/1000/stack')
+ ])
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_config(self, mock_exists, mock_debug):
+ mock_exists.return_value = False
+ mock_ctx_inst = mock.Mock()
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_debug.assert_called_once_with(f"SBD config file {constants.SBDCONF} does not exist")
+
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_cmd(self, mock_exists, mock_copy, mock_which):
+ mock_exists.return_value = True
+ mock_which.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info(self, mock_exists, mock_copy, mock_which, mock_run, mock_debug, mock_open_file, mock_real_path):
+ mock_real_path.return_value = constants.SBD_F
+ mock_exists.return_value = True
+ mock_which.return_value = True
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_run.return_value = "data"
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+
+ collect.collect_sbd_info(mock_ctx_inst)
+
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+ mock_open_file.assert_called_once_with(f"{mock_ctx_inst.work_dir}/{constants.SBD_F}", "w")
+ file_handle.write.assert_has_calls([
+ mock.call("\n\n#=====[ Command ] ==========================#\n"),
+ mock.call("# . /etc/sysconfig/sbd;export SBD_DEVICE;sbd dump;sbd list\n"),
+ mock.call("data")
+ ])
+ mock_debug.assert_called_once_with(f"Dump SBD config file into {constants.SBD_F}")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_pe_to_dot(self, mock_run, mock_warning):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, None)
+ collect.pe_to_dot("/opt/pe-input-0.bz2")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_simulate -D /opt/pe-input-0.dot -x /opt/pe-input-0.bz2")
+ mock_warning.assert_called_once_with('pe_to_dot: %s -> %s failed', '/opt/pe-input-0.bz2', '/opt/pe-input-0.dot')
+
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs_no_found(self, mock_logger, mock_find_files):
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir")
+ mock_find_files.return_value = []
+ collect.collect_pe_inputs(mock_ctx_inst)
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call("No PE file found for the giving time")
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.pe_to_dot')
+ @mock.patch('os.symlink')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs(self, mock_logger, mock_find_files, mock_mkdir, mock_symlink, mock_to_dot, mock_real_path):
+ mock_real_path.return_value = "pe_dir"
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir", work_dir="/opt/work_dir", speed_up=False)
+ mock_find_files.return_value = ["/opt/pe_dir/pe_input1", "/opt/pe_dir/pe_input2"]
+
+ collect.collect_pe_inputs(mock_ctx_inst)
+
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call(f"Found 2 PE files in {mock_ctx_inst.pe_dir}"),
+ ])
+ mock_logger.debug.assert_called_once_with(f"Dump PE files into pe_dir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ def test_collect_sys_stats(self, mock_run, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.SYSSTATS_F
+ mock_run.side_effect = [
+ "data_hostname", "data_uptime", "data_ps_axf", "data_ps_auxw",
+ "data_top", "data_ip_addr", "data_ip_link", "data_ip_show", "data_iscsi",
+ "data_lspci", "data_mount", "data_cpuinfo", TimeoutExpired("df", 5)
+ ]
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sys_stats(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"Timeout while running command: df")
+ mock_run.assert_has_calls([
+ mock.call("hostname", timeout=5),
+ mock.call("uptime", timeout=5),
+ mock.call("ps axf", timeout=5),
+ mock.call("ps auxw", timeout=5),
+ mock.call("top -b -n 1", timeout=5),
+ mock.call("ip addr", timeout=5),
+ mock.call("ip -s link", timeout=5),
+ mock.call("ip n show", timeout=5),
+ mock.call("lsscsi", timeout=5),
+ mock.call("lspci", timeout=5),
+ mock.call("mount", timeout=5),
+ mock.call("cat /proc/cpuinfo", timeout=5),
+ mock.call("df", timeout=5)
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_distro_info')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.uname')
+ @mock.patch('crmsh.report.utils.Package')
+ def test_collect_sys_info(self, mock_package, mock_uname, mock_str2file, mock_get_distro, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.SYSINFO_F
+ mock_package_inst = mock.Mock()
+ mock_package.return_value = mock_package_inst
+ mock_package_inst.version = mock.Mock(return_value="version_data\n")
+ mock_package_inst.verify = mock.Mock(return_value="verify_data\n")
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work")
+ mock_uname.return_value = ("Linux", None, "4.5", None, "x86_64")
+ mock_get_distro.return_value = "suse"
+
+ collect.collect_sys_info(mock_ctx_inst)
+
+ mock_package.assert_called_once_with(constants.PACKAGES)
+ mock_str2file.assert_called_once_with('##### System info #####\nPlatform: Linux\nKernel release: 4.5\nArchitecture: x86_64\nDistribution: suse\n\n##### Installed cluster related packages #####\nversion_data\n\n\n##### Verification output of packages #####\nverify_data\n', '/opt/work/sysinfo.txt')
+ mock_debug.assert_called_once_with(f"Dump packages and platform info into {constants.SYSINFO_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.dump_runtime_state')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_running(self, mock_service, mock_dump_state, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = True
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_stopped(self, mock_service, mock_copy2, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", cib_dir="/var/log/pacemaker/cib")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ @mock.patch('os.path.isfile')
+ def test_consume_cib_in_workdir(self, mock_isfile, mock_run, mock_str2file):
+ mock_isfile.return_value = True
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["data1", "data2"]
+ collect.consume_cib_in_workdir("/workdir")
+ mock_isfile.assert_called_once_with(f"/workdir/{constants.CIB_F}")
+ mock_run_inst.get_stdout_or_raise_error.assert_has_calls([
+ mock.call('CIB_file=/workdir/cib.xml crm configure show'),
+ mock.call('crm_verify -V -x /workdir/cib.xml')
+ ])
+ mock_str2file.assert_has_calls([
+ mock.call("data1", f"/workdir/{constants.CONFIGURE_SHOW_F}"),
+ mock.call("data2", f"/workdir/{constants.CRM_VERIFY_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces_return(self, mock_run, mock_logger):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"])
+ collect.collect_ratraces(mock_ctx_inst)
+ mock_logger.debug2.assert_not_called()
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces(self, mock_run, mock_find, mock_mkdirp, mock_copy, mock_logger, mock_real_path):
+ mock_real_path.return_value = "/var/log"
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "INFO: Trace for .* is written to /var/log/cluster/pacemaker.log"
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, data, None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work")
+ mock_find.return_value = ["/var/log/cluster"]
+
+ collect.collect_ratraces(mock_ctx_inst)
+
+ mock_logger.debug2.assert_called_once_with('Looking for RA trace files in "%s"', '/var/log/cluster')
+ mock_logger.debug.assert_called_once_with(f'Dump RA trace files into {mock_real_path.return_value}')
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_lsof_ocfs2_device(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mount_data = """
+/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)
+tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64)
+/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non
+ """
+ mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)]
+ res = collect.lsof_ocfs2_device()
+ self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("mount"),
+ mock.call("lsof /dev/sda7")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('os.path.exists')
+ @mock.patch('shutil.which')
+ def test_ocfs2_commands_output(self, mock_which, mock_exists, mock_run):
+ mock_which.side_effect = [False for i in range(5)] + [True, True]
+ mock_exists.return_value = False
+ mock_run.return_value = "data"
+ res = collect.ocfs2_commands_output()
+ self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mount\ndata")
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_error(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, "error")
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('Failed to run "mounted.ocfs2 -d": error', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_no_found(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('No ocfs2 partitions found', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.ocfs2_commands_output')
+ @mock.patch('crmsh.report.collect.lsof_ocfs2_device')
+ @mock.patch('crmsh.report.collect.dump_D_process')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info(self, mock_run, mock_str2file, mock_debug2, mock_D, mock_lsof, mock_output, mock_real_path):
+ mock_real_path.return_value = constants.OCFS2_F
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "line1\nline2", None)
+ mock_D.return_value = "data_D\n"
+ mock_lsof.return_value = "data_lsof\n"
+ mock_output.return_value = "data_output\n"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('data_D\ndata_lsof\ndata_output\n', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ def test_collect_dlm_info(self, mock_which, mock_get_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.DLM_DUMP_F
+ mock_which.return_value = True
+ ls_data = """
+dlm lockspaces
+name 08BB5A6A38EE491DBF63627EEB57E558
+id 0x19041a12
+ """
+ mock_get_output.side_effect = [ls_data, "lockdebug data", "dump data"]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ collect.collect_dlm_info(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump DLM information into {constants.DLM_DUMP_F}")
+
+ @mock.patch('crmsh.report.collect.dump_core_info')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_coredump_info(self, mock_find, mock_basename, mock_warning, mock_dump):
+ mock_ctx_inst = mock.Mock(cores_dir_list=['/var/lib/pacemaker/cores'], work_dir="/opt/work_dir")
+ mock_find.return_value = ["/var/lib/pacemaker/cores/core.1"]
+ mock_basename.return_value = "core.1"
+ collect.collect_coredump_info(mock_ctx_inst)
+ mock_dump.assert_called_once_with("/opt/work_dir", mock_find.return_value)
+ mock_warning.assert_called_once_with(f"Found coredump file: {mock_find.return_value}")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core_not_found(self, mock_run):
+ mock_run().get_stdout_stderr.return_value = (0, "Core not found", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Cannot find the program path for core core.1", res)
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "Core was generated by `/usr/sbin/crm_mon'", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Core core.1 was generated by /usr/sbin/crm_mon", res)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.which')
+ def test_dump_core_info_no_gdb(self, mock_which, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = False
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_logger.warning.assert_called_once_with("Please install gdb to get more info for coredump files")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.find_binary_path_for_core')
+ @mock.patch('shutil.which')
+ def test_dump_core_info(self, mock_which, mock_find_binary, mock_str2file, mock_debug2, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = True
+ mock_find_binary.return_value = "data"
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_str2file.assert_called_once_with("data\n\nPlease utilize the gdb and debuginfo packages to obtain more detailed information locally", f"/opt/workdir/{constants.COREDUMP_F}")
+ mock_debug2(f"Dump coredump info into {constants.COREDUMP_F}")
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('pwd.getpwnam')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_collect_perms_state(self, mock_isdir, mock_stat, mock_getpwnam, mock_str2file):
+ mock_ctx_inst = mock.Mock(
+ pcmk_lib_dir="/var/lib/pacemaker",
+ pe_dir="/var/lib/pacemaker/pe",
+ cib_dir="/var/lib/pacemaker/cib",
+ work_dir="/opt/work_dir"
+ )
+ mock_isdir.side_effect = [False, True, True]
+ mock_stat_inst_pe = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat_inst_cib = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat.side_effect = [mock_stat_inst_pe, mock_stat_inst_cib]
+ mock_getpwnam_inst_pe = mock.Mock(pw_uid=1000, pw_gid=1000)
+ mock_getpwnam_inst_cib = mock.Mock(pw_uid=1001, pw_gid=1000)
+ mock_getpwnam.side_effect = [mock_getpwnam_inst_pe, mock_getpwnam_inst_cib]
+
+ collect.collect_perms_state(mock_ctx_inst)
+
+ data = "##### Check perms for /var/lib/pacemaker: /var/lib/pacemaker is not a directory or does not exist\n##### Check perms for /var/lib/pacemaker/pe: OK\n##### Check perms for /var/lib/pacemaker/cib: Permissions or ownership for /var/lib/pacemaker/cib are incorrect\n"
+ mock_str2file.assert_called_once_with(data, f"/opt/work_dir/{constants.PERMISSIONS_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.get_dc')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_dump_runtime_state(self, mock_run, mock_str2file, mock_debug, mock_get_dc, mock_this_node, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.CRM_MON_F,
+ constants.CIB_F,
+ constants.MEMBERSHIP_F,
+ "workdir"
+ ]
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["crm_mon_data", "cib_data", "crm_node_data"]
+ mock_get_dc.return_value = "node1"
+ mock_this_node.return_value = "node1"
+ collect.dump_runtime_state("/opt/workdir")
+ mock_debug.assert_has_calls([
+ mock.call(f"Dump cluster state into {constants.CRM_MON_F}"),
+ mock.call(f"Dump CIB contents into {constants.CIB_F}"),
+ mock.call(f"Dump members of this partition into {constants.MEMBERSHIP_F}"),
+ mock.call(f"Current DC is node1; Touch file 'DC' in workdir")
+ ])
+
+ @mock.patch('shutil.copytree')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.corosync.conf')
+ def test_dump_configurations(self, mock_corosync_conf, mock_isfile, mock_copy2, mock_isdir, mock_basename, mock_copytree):
+ mock_corosync_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_isfile.side_effect = [True, True, False, True]
+ mock_isdir.return_value = True
+ mock_basename.return_value = "drbd.d"
+ collect.dump_configurations("/opt/workdir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_corosync_blackbox(self, mock_find_files, mock_get_cmd_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.COROSYNC_RECORDER_F
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ mock_find_files.return_value = ["/var/lib/corosync/fdata.1"]
+ mock_get_cmd_output.return_value = "data"
+ collect.collect_corosync_blackbox(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump corosync blackbox info into {constants.COROSYNC_RECORDER_F}")
diff --git a/test/unittests/test_report_core.py b/test/unittests/test_report_core.py
new file mode 100644
index 0000000..dd6e842
--- /dev/null
+++ b/test/unittests/test_report_core.py
@@ -0,0 +1,551 @@
+from crmsh import config
+from crmsh.report import core, constants, utils, collect
+import crmsh.log
+
+import sys
+import argparse
+import unittest
+from unittest import mock
+
+
+class TestCapitalizedHelpFormatter(unittest.TestCase):
+ def setUp(self):
+ # Initialize the ArgumentParser with the CapitalizedHelpFormatter
+ self.parser = argparse.ArgumentParser(
+ formatter_class=core.CapitalizedHelpFormatter,
+ usage="usage: test"
+ )
+ self.parser.add_argument('--test', help='Test option')
+
+ def test_usage(self):
+ # Test that the usage is capitalized
+ usage_text = self.parser.format_usage()
+ self.assertTrue(usage_text.startswith('Usage: '))
+
+ def test_section_heading(self):
+ # Test that section headings are capitalized
+ section_text = self.parser.format_help()
+ self.assertTrue('Option' in section_text)
+
+
+class TestContext(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.parse_to_timestamp')
+ @mock.patch('crmsh.report.utils.now')
+ @mock.patch('crmsh.report.core.config')
+ def setUp(self, mock_config, mock_now, mock_parse_to_timestamp):
+ mock_config.report = mock.Mock(
+ from_time="20230101",
+ compress=False,
+ collect_extra_logs="file1 file2",
+ remove_exist_dest=False,
+ single_node=False
+ )
+ mock_now.return_value = "12345"
+ mock_parse_to_timestamp.return_value = "54321"
+ self.context = core.Context()
+ self.context.load()
+
+ def test_attribute_setting(self):
+ self.context.name = "value"
+ self.assertEqual(self.context.name, "value")
+ self.context["age"] = 19
+ self.assertEqual(self.context.age, 19)
+ self.context.extra_log_list = ["file3", "file2"]
+ self.assertEqual(len(self.context.extra_log_list), 3)
+
+ @mock.patch('json.dumps')
+ def test_str(self, mock_dumps):
+ mock_dumps.return_value = "json str"
+ self.assertEqual(self.context.name, "crm_report")
+ self.assertEqual(self.context.from_time, "54321")
+ self.assertEqual(str(self.context), "json str")
+
+
+class TestRun(unittest.TestCase):
+
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dest_not_exist(self, mock_isdir):
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Directory /opt/test does not exist", str(err.exception))
+
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_filename_not_sane(self, mock_isdir, mock_basename, mock_sane):
+ mock_isdir.return_value = True
+ mock_sane.return_value = False
+ mock_basename.return_value = "report*"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report*")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("report* is invalid file name", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('shutil.rmtree')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists_rmtree(self, mock_isdir, mock_basename, mock_sane, mock_rmtree, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=True)
+ core.process_dest(mock_ctx_inst)
+ mock_rmtree.assert_called_once_with("/opt/test/report")
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists(self, mock_isdir, mock_basename, mock_sane, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=False)
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Destination directory /opt/test/report exists, please cleanup or use -Z option", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.utils.now')
+ def test_process_dest(self, mock_now, mock_isdir, mock_basename, mock_is_sane, mock_pick):
+ mock_now.return_value = "Mon-28-Aug-2023"
+ mock_isdir.side_effect = [True, False]
+ mock_is_sane.return_value = True
+ mock_basename.return_value = f"report.{mock_now.return_value}"
+ mock_ctx_inst = mock.Mock(dest=None, no_compress=False, compress_suffix=".bz2", name="report")
+
+ core.process_dest(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.dest_dir, ".")
+ mock_is_sane.assert_called_once_with(mock_basename.return_value)
+ self.assertEqual(mock_ctx_inst.dest_path, "./report.Mon-28-Aug-2023.tar.bz2")
+
+ @mock.patch('crmsh.report.core.pick_first_compress')
+ def test_pick_compress_prog(self, mock_pick):
+ mock_pick.return_value = (None, None)
+ mock_ctx_inst = mock.Mock()
+ core.pick_compress_prog(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.compress_prog, "cat")
+
+ @mock.patch('shutil.which')
+ def test_pick_first_compress_return(self, mock_which):
+ mock_which.return_value = True
+ prog, ext = core.pick_first_compress()
+ self.assertEqual(prog, "gzip")
+ self.assertEqual(ext, ".gz")
+ mock_which.assert_called_once_with("gzip")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_pick_first_compress(self, mock_which, mock_warn):
+ mock_which.side_effect = [False, False, False]
+ prog, ext = core.pick_first_compress()
+ self.assertIsNone(prog)
+ self.assertIsNone(ext)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('logging.Logger.info')
+ def test_finalword(self, mock_info, mock_get_timespan):
+ mock_ctx_inst = mock.Mock(dest_path="./crm_report-Tue-15-Aug-2023.tar.bz2", node_list=["node1", "node2"])
+ mock_get_timespan.return_value = "2023-08-14 18:17 - 2023-08-15 06:17"
+ core.finalword(mock_ctx_inst)
+ mock_info.assert_has_calls([
+ mock.call(f"The report is saved in {mock_ctx_inst.dest_path}"),
+ mock.call(f"Report timespan: {mock_get_timespan.return_value}"),
+ mock.call(f"Including nodes: {' '.join(mock_ctx_inst.node_list)}"),
+ mock.call("Thank you for taking time to create this report")
+ ])
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir_collector(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir", me="node1")
+ mock_collector.return_value = True
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir")
+ mock_collector.return_value = False
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.load_from_crmsh_config')
+ def test_load_context_attributes(self, mock_load, mock_isdir):
+ mock_ctx_inst = mock.Mock(cib_dir="/var/lib/pacemaker/cib")
+ mock_isdir.return_value = True
+
+ core.load_context_attributes(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.pcmk_lib_dir, "/var/lib/pacemaker")
+ self.assertEqual(mock_ctx_inst.cores_dir_list, ["/var/lib/pacemaker/cores", constants.COROSYNC_LIB])
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ crm_daemon_dir="/usr/lib/pacemaker",
+ pe_state_dir="/var/lib/pacemaker/pe"
+ )
+ mock_isdir.side_effect = [True, True, True]
+ mock_ctx_inst = mock.Mock()
+
+ core.load_from_crmsh_config(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.cib_dir, mock_config.path.crm_config)
+ self.assertEqual(mock_ctx_inst.pcmk_exec_dir, mock_config.path.crm_daemon_dir)
+ self.assertEqual(mock_ctx_inst.pe_dir, mock_config.path.pe_state_dir)
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config_exception(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ )
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock()
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.load_from_crmsh_config(mock_ctx_inst)
+ self.assertEqual(f"Cannot find CIB directory", str(err.exception))
+
+ def test_adjust_verbosity_debug(self):
+ mock_ctx_inst = mock.Mock(debug=1)
+ core.adjust_verbosity(mock_ctx_inst)
+
+ def test_adjust_verbosity(self):
+ mock_ctx_inst = mock.Mock(debug=0)
+ config.core.debug = True
+ core.adjust_verbosity(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('json.loads')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_load_context(self, mock_logger, mock_json_loads, mock_config, mock_verbosity):
+ class Context:
+ def __str__(self):
+ return "data"
+ def __setitem__(self, key, value):
+ self.__dict__[key] = value
+
+ sys.argv = ["arg1", "arg2", "arg3"]
+ mock_config.report = mock.Mock(verbosity=None)
+ mock_json_loads.return_value = {"key": "value", "debug": "true"}
+ mock_ctx_inst = Context()
+ core.load_context(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with("Loading context from collector: data")
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.process_arguments')
+ @mock.patch('crmsh.utils.check_empty_option_value')
+ @mock.patch('crmsh.report.core.add_arguments')
+ def test_parse_arguments(self, mock_parse, mock_check_space, mock_process, mock_verbosity):
+ mock_args = mock.Mock(option1="value1")
+ mock_parse.return_value = mock_args
+ mock_ctx_inst = mock.Mock()
+
+ core.parse_arguments(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.option1, "value1")
+
+ mock_check_space.assert_called_once_with(mock_args)
+ mock_process.assert_called_once_with(mock_ctx_inst)
+
+ def test_is_collector(self):
+ sys.argv = ["report", "__collector"]
+ self.assertEqual(core.is_collector(), True)
+
+ @mock.patch('crmsh.report.core.push_data')
+ @mock.patch('crmsh.report.core.collect_logs_and_info')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl_collector(self, mock_context, mock_collector, mock_load, mock_setup, mock_collect_info, mock_push):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [True, True]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_collect_info.assert_called_once_with(mock_ctx_inst)
+ mock_push.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.process_results')
+ @mock.patch('crmsh.report.core.collect_for_nodes')
+ @mock.patch('crmsh.report.core.find_ssh_user')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context_attributes')
+ @mock.patch('crmsh.report.core.parse_arguments')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl(self, mock_context, mock_collector, mock_parse, mock_load, mock_setup, mock_find_ssh, mock_collect, mock_process_results):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [False, False]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_parse.assert_called_once_with(mock_ctx_inst)
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_find_ssh.assert_called_once_with(mock_ctx_inst)
+ mock_collect.assert_called_once_with(mock_ctx_inst)
+ mock_process_results.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception_generic(self, mock_run, mock_log_error):
+ mock_run.side_effect = utils.ReportGenericError("error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_log_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception(self, mock_run, mock_print):
+ mock_run.side_effect = UnicodeDecodeError("encoding", b'', 0, 1, "error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_print.assert_called_once_with()
+
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments_help(self, mock_argparse, mock_formatter):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+
+ with self.assertRaises(SystemExit):
+ core.add_arguments()
+
+ mock_argparse_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments(self, mock_argparse, mock_formatter, mock_config):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=False, debug=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+ mock_config.report = mock.Mock(verbosity=False)
+
+ core.add_arguments()
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.to_ascii')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_push_data(self, mock_sh_utils, mock_to_ascii, mock_logger):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, "data", "error")
+ mock_to_ascii.return_value = "error"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", main_node="node1", me="node1")
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.push_data(mock_ctx_inst)
+ self.assertEqual("error", str(err.exception))
+
+ mock_logger.debug2.assert_called_once_with("Pushing data from node1:/opt/work_dir to node1")
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with("cd /opt/work_dir/.. && tar -h -c node1", raw=True)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('shutil.move')
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ def test_process_results_no_compress(self, mock_analyze, mock_create, mock_move, mock_final):
+ mock_ctx_inst = mock.Mock(speed_up=True, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=True)
+ core.process_results(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+ mock_move.assert_called_once_with(mock_ctx_inst.work_dir, mock_ctx_inst.dest_dir)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('crmsh.report.core.sh.cluster_shell')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ @mock.patch('crmsh.report.utils.do_sanitize')
+ def test_process_results(self, mock_sanitize, mock_analyze, mock_create, mock_debug2, mock_run, mock_final):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error = mock.Mock()
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=False, dest="report", compress_prog="tar", compress_suffix=".bz2")
+ core.process_results(mock_ctx_inst)
+ mock_sanitize.assert_called_once_with(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.getmembers')
+ @mock.patch('multiprocessing.cpu_count')
+ @mock.patch('multiprocessing.Pool')
+ def test_collect_logs_and_info(self, mock_pool, mock_cpu_count, mock_getmember, mock_print):
+ mock_cpu_count.return_value = 4
+ mock_pool_inst = mock.Mock()
+ mock_pool.return_value = mock_pool_inst
+ mock_pool_inst.apply_async = mock.Mock()
+ mock_async_inst1 = mock.Mock()
+ mock_async_inst2 = mock.Mock()
+ mock_pool_inst.apply_async.side_effect = [mock_async_inst1, mock_async_inst2]
+ mock_async_inst1.get = mock.Mock()
+ mock_async_inst2.get = mock.Mock(side_effect=ValueError)
+ mock_pool_inst.close = mock.Mock()
+ mock_pool_inst.join = mock.Mock()
+ mock_getmember.return_value = [("collect_func1", None), ("collect_func2", None)]
+ collect.collect_func1 = mock.Mock()
+ collect.collect_func2 = mock.Mock()
+ mock_ctx_inst = mock.Mock()
+
+ core.collect_logs_and_info(mock_ctx_inst)
+ mock_pool.assert_called_once_with(3)
+
+ @mock.patch('multiprocessing.Process')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.report.core.start_collector')
+ def test_collect_for_nodes(self, mock_start_collector, mock_info, mock_process):
+ mock_ctx_inst = mock.Mock(
+ node_list=["node1", "node2"],
+ ssh_askpw_node_list=["node2"],
+ ssh_user=""
+ )
+ mock_process_inst = mock.Mock()
+ mock_process.return_value = mock_process_inst
+ core.collect_for_nodes(mock_ctx_inst)
+
+ def test_process_arguments_value_error(self):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=100)
+ with self.assertRaises(ValueError) as err:
+ core.process_arguments(mock_ctx_inst)
+ self.assertEqual("The start time must be before the finish time", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_exception(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=[])
+ mock_list_nodes.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual("Could not figure out a list of nodes; is this a cluster node?", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_single(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=True, me="node1")
+ core.process_node_list(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list(self, mock_list_nodes, mock_ping, mock_error):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=False, me="node1")
+ mock_ping.side_effect = ValueError("error")
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.node_list, ["node1"])
+
+ @mock.patch('crmsh.report.core.process_node_list')
+ @mock.patch('crmsh.report.core.process_dest')
+ def test_process_arguments(self, mock_dest, mock_node_list):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=150)
+ core.process_arguments(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user_not_found(self, mock_get_sudoer, mock_getuser, mock_check_ssh, mock_logger):
+ mock_get_sudoer.return_value = ""
+ mock_getuser.return_value = "user2"
+ mock_check_ssh.return_value = True
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"], me="node1")
+ core.find_ssh_user(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"passwordless ssh to node(s) ['node2'] does not work")
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user(self, mock_get_sudoer, mock_getuser, mock_this_node, mock_check_ssh, mock_debug, mock_warn, mock_debug2):
+ mock_get_sudoer.return_value = "user1"
+ mock_getuser.return_value = "user2"
+ mock_this_node.return_value = "node1"
+ mock_check_ssh.return_value = False
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"])
+ core.find_ssh_user(mock_ctx_inst)
+ self.assertEqual("sudo", mock_ctx_inst.sudo)
+ self.assertEqual("user1", mock_ctx_inst.ssh_user)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_start_collector_return(self, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, '', None)
+ mock_ctx_inst = mock.Mock(me="node1")
+ core.start_collector("node1", mock_ctx_inst)
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with(f"{constants.BIN_COLLECTOR} '{mock_ctx_inst}'")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector_warn(self, mock_this_node, mock_sh, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (1, '', "error")
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ core.start_collector("node1", mock_ctx_inst)
+ mock_warn.assert_called_once_with("error")
+
+ @mock.patch('ast.literal_eval')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector(self, mock_this_node, mock_sh_utils, mock_sh, mock_eval):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (0, f"line1\n{constants.COMPRESS_DATA_FLAG}data", None)
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ mock_eval.return_value = "data"
+ core.start_collector("node1", mock_ctx_inst)
diff --git a/test/unittests/test_report_utils.py b/test/unittests/test_report_utils.py
new file mode 100644
index 0000000..aa28563
--- /dev/null
+++ b/test/unittests/test_report_utils.py
@@ -0,0 +1,862 @@
+import sys
+import datetime
+from crmsh import config
+from crmsh import utils as crmutils
+from crmsh.report import utils, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestPackage(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.get_pkg_mgr')
+ def setUp(self, mock_get_pkg_mgr):
+ mock_get_pkg_mgr.side_effect = [None, "rpm", "deb"]
+ self.inst_none = utils.Package("xxx1 xxx2")
+ self.inst = utils.Package("rpm1 rpm2")
+ self.inst_deb = utils.Package("deb1 deb2")
+
+ def test_version_return(self):
+ res = self.inst_none.version()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.pkg_ver_rpm')
+ def test_version(self, mock_ver_rpm):
+ mock_ver_rpm.return_value = "version1"
+ res = self.inst.version()
+ self.assertEqual(res, "version1")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "rpm1-4.5.0\nrpm2 not installed"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst.pkg_ver_rpm()
+ self.assertEqual(res, "rpm1-4.5.0")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "deb1-4.5.0\nno packages found"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst_deb.pkg_ver_deb()
+ self.assertEqual(res, "deb1-4.5.0")
+
+ def test_verify_return(self):
+ res = self.inst_none.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.verify_rpm')
+ def test_verify(self, mock_verify_rpm):
+ mock_verify_rpm.return_value = ""
+ res = self.inst.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst.verify_rpm()
+ self.assertEqual(res, "verify data")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst_deb.verify_deb()
+ self.assertEqual(res, "verify data")
+
+
+class TestSanitizer(unittest.TestCase):
+
+ def setUp(self):
+ mock_ctx_inst_no_sanitize = mock.Mock(sanitize=False)
+ self.s_inst_no_sanitize = utils.Sanitizer(mock_ctx_inst_no_sanitize)
+
+ mock_ctx_inst_no_sanitize_set = mock.Mock(sensitive_regex_list=[])
+ self.s_inst_no_sanitize_set = utils.Sanitizer(mock_ctx_inst_no_sanitize_set)
+
+ mock_ctx_inst = mock.Mock(sanitize=True, work_dir="/opt", sensitive_regex_list=["test_patt"])
+ self.s_inst = utils.Sanitizer(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_return(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_warning):
+ mock_include.return_value = True
+ self.s_inst_no_sanitize.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_warning.assert_has_calls([
+ mock.call("Some PE/CIB/log files contain possibly sensitive data"),
+ mock.call("Using \"-s\" option can replace sensitive data")
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer._get_file_list_in_work_dir')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_get_file):
+ mock_include.return_value = True
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_get_file.assert_called_once_with
+
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_no_sensitive_data(self, mock_load_cib, mock_parse, mock_extract, mock_include):
+ mock_include.return_value = False
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+
+ def test_include_sensitive_data(self):
+ res = self.s_inst._include_sensitive_data()
+ self.assertEqual(res, [])
+
+ @mock.patch('os.walk')
+ def test_get_file_list_in_work_dir(self, mock_walk):
+ mock_walk.return_value = [
+ ("/opt", [], ["file1", "file2"]),
+ ("/opt/dir1", [], ["file3"]),
+ ]
+ self.s_inst._get_file_list_in_work_dir()
+ self.assertEqual(self.s_inst.file_list_in_workdir, ['/opt/file1', '/opt/file2', '/opt/dir1/file3'])
+
+ @mock.patch('glob.glob')
+ def test_load_cib_from_work_dir_no_cib(self, mock_glob):
+ mock_glob.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"CIB file {constants.CIB_F} was not collected", str(err.exception))
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir_empty(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"File /opt/node1/{constants.CIB_F} is empty", str(err.exception))
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = "data"
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(self.s_inst.cib_data, "data")
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set_no_set(self, mock_logger):
+ config.report.sanitize_rule = ""
+ self.s_inst_no_sanitize_set._parse_sensitive_set()
+ self.assertEqual(self.s_inst_no_sanitize_set.sensitive_regex_set, set(utils.Sanitizer.DEFAULT_RULE_LIST))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(utils.Sanitizer.DEFAULT_RULE_LIST)}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set(self, mock_logger):
+ config.report.sanitize_rule = "passw.*"
+ self.s_inst._parse_sensitive_set()
+ self.assertEqual(self.s_inst.sensitive_regex_set, set(['test_patt', 'passw.*']))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(['test_patt', 'passw.*'])}")
+
+ def test_sanitize_return(self):
+ self.s_inst_no_sanitize.sanitize()
+
+ @mock.patch('crmsh.report.utils.write_to_file')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.Sanitizer._sub_sensitive_string')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_sanitize(self, mock_read, mock_sub, mock_debug, mock_write):
+ self.s_inst.file_list_in_workdir = ["file1", "file2"]
+ mock_read.side_effect = [None, "data"]
+ mock_sub.return_value = "replace_data"
+ self.s_inst.sanitize()
+ mock_debug.assert_called_once_with("Replace sensitive info for %s", "file2")
+
+ def test_extract_from_cib(self):
+ self.s_inst.cib_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ """
+ res = self.s_inst._extract_from_cib("passw.*")
+ self.assertEqual(res, ["qwertyui"])
+
+ def test_sub_sensitive_string(self):
+ data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="13356789876"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ This my tel 13356789876
+ """
+ self.s_inst.sensitive_value_list_with_raw_option = ["13356789876"]
+ self.s_inst.sensitive_key_list = ["passw.*"]
+ self.s_inst.sensitive_value_list = ["qwertyui"]
+ res = self.s_inst._sub_sensitive_string(data)
+ expected_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="******"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="******"/>
+ </utilization>
+ This my tel ******
+ """
+ self.assertEqual(res, expected_data)
+
+ @mock.patch('logging.Logger.warning')
+ def test_extract_sensitive_value_list_warn(self, mock_warn):
+ self.s_inst.sensitive_regex_set = set(["TEL:test"])
+ self.s_inst._extract_sensitive_value_list()
+ mock_warn.assert_called_once_with("For sanitize pattern TEL:test, option should be \"raw\"")
+
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_from_cib')
+ def test_extract_sensitive_value_list(self, mock_extract):
+ mock_extract.side_effect = [["123456"], ["qwertyui"]]
+ self.s_inst.sensitive_regex_set = set(["TEL:raw", "passw.*"])
+ self.s_inst._extract_sensitive_value_list()
+
+class TestUtils(unittest.TestCase):
+
+ @mock.patch('builtins.sorted', side_effect=lambda x, *args, **kwargs: x[::-1])
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.report.utils.is_our_log')
+ def test_arch_logs(self, mock_is_our_log, mock_glob, mock_logger, mock_timespan, mock_sorted):
+ mock_is_our_log.return_value = utils.LogType.GOOD
+ mock_glob.return_value = []
+ mock_ctx_inst = mock.Mock()
+ mock_timespan.return_value = "0101-0202"
+
+ return_list, log_type = utils.arch_logs(mock_ctx_inst, "file1")
+
+ self.assertEqual(return_list, ["file1"])
+ self.assertEqual(log_type, utils.LogType.GOOD)
+ mock_logger.debug2.assert_called_once_with("Found logs ['file1'] in 0101-0202")
+
+ @mock.patch('sys.stdout.flush')
+ @mock.patch('traceback.print_exc')
+ def test_print_traceback(self, mock_trace, mock_flush):
+ utils.print_traceback()
+ mock_trace.assert_called_once_with()
+
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_get_timespan_str(self, mock_ts_to_str):
+ mock_ctx_inst = mock.Mock(from_time=1691938980.0, to_time=1691982180.0)
+ mock_ts_to_str.side_effect = ["2023-08-13 23:03", "2023-08-14 11:03"]
+ res = utils.get_timespan_str(mock_ctx_inst)
+ self.assertEqual(res, "2023-08-13 23:03 - 2023-08-14 11:03")
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_get_cmd_output(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "stdout_data", "stderr_data")
+ res = utils.get_cmd_output("cmd")
+ self.assertEqual(res, "stdout_data\nstderr_data\n")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("cmd", timeout=None)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_empty(self, mock_read):
+ mock_read.return_value = None
+ mock_ctx_inst = mock.Mock()
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.EMPTY)
+ mock_read.assert_called_once_with("/opt/logfile")
+
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_irregular(self, mock_read, mock_log_format):
+ mock_read.return_value = "This is the log"
+ mock_ctx_inst = mock.Mock()
+ mock_log_format.return_value = None
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.IRREGULAR)
+ mock_read.assert_called_once_with("/opt/logfile")
+ mock_log_format.assert_called_once_with(mock_read.return_value)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_before(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1600, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.BEFORE_TIMESPAN)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_good(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1200, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.GOOD)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_after(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=200, to_time=800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.AFTER_TIMESPAN)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr_unknown(self, mock_which, mock_warning):
+ mock_which.side_effect = [False, False]
+ self.assertEqual(utils.get_pkg_mgr(), "")
+ mock_warning.assert_called_once_with("Unknown package manager!")
+
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr(self, mock_which):
+ mock_which.return_value = True
+ utils.get_pkg_mgr()
+ self.assertEqual(utils.get_pkg_mgr(), "rpm")
+
+ @mock.patch('os.walk')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_find_files_in_timespan(self, mock_isdir, mock_stat, mock_walk):
+ mock_isdir.side_effect = [True, False]
+ mock_stat.return_value = mock.Mock(st_ctime=1615)
+ mock_walk.return_value = [
+ ('/mock_dir', [], ['file1.txt', 'file2.txt'])
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1611, to_time=1620)
+
+ res = utils.find_files_in_timespan(mock_ctx_inst, ['/mock_dir', '/not_exist'])
+
+ expected_result = ['/mock_dir/file1.txt', '/mock_dir/file2.txt']
+ self.assertEqual(res, expected_result)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_return(self, mock_arch, mock_debug, mock_timespan):
+ mock_arch.return_value = [[], ""]
+ mock_ctx_inst = mock.Mock()
+ utils.dump_logset(mock_ctx_inst, "file")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_irrgular(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.IRREGULAR]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+ utils.dump_logset(mock_ctx_inst, "file1")
+ mock_print.assert_called_once_with("file1", 0, 0)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_one(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_called_once_with("file1", 10, 20)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1", "file2", "file3"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.side_effect = ["data1\n", "data2\n", "data3\n"]
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_has_calls([
+ mock.call("file3", 10, 0),
+ mock.call("file2", 0, 0),
+ mock.call("file1", 0, 20),
+ ])
+ mock_str2file.assert_called_once_with("data1\ndata2\ndata3", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info(self, mock_debug2, mock_exists, mock_read):
+ mock_exists.return_value = True
+ mock_read.return_value = """
+VERSION_ID="20230629"
+PRETTY_NAME="openSUSE Tumbleweed"
+ANSI_COLOR="0;32"
+ """
+ res = utils.get_distro_info()
+ self.assertEqual(res, "openSUSE Tumbleweed")
+
+ @mock.patch('shutil.which')
+ @mock.patch('crmsh.report.utils.sh.LocalShell')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info_lsb(self, mock_debug2, mock_exists, mock_sh, mock_which):
+ mock_which.return_value = True
+ mock_exists.return_value = False
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_stdout_or_raise_error.return_value = "data"
+ res = utils.get_distro_info()
+ self.assertEqual(res, "Unknown")
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp_none(self, mock_get_timestamp):
+ mock_get_timestamp.side_effect = [None, None]
+ data = ["line1", "line2"]
+ self.assertIsNone(utils.find_first_timestamp(data, "file1"))
+ mock_get_timestamp.assert_has_calls([
+ mock.call("line1", "file1"),
+ mock.call("line2", "file1")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp(self, mock_get_timestamp):
+ mock_get_timestamp.return_value = 123456
+ data = ["line1", "line2"]
+ res = utils.find_first_timestamp(data, "file1")
+ self.assertEqual(res, 123456)
+ mock_get_timestamp.assert_called_once_with("line1", "file1")
+
+ def test_filter_lines(self):
+ data = """line1
+line2
+line3
+line4
+line5
+ """
+ res = utils.filter_lines(data, 2, 4)
+ self.assertEqual(res, 'line2\nline3\nline4\n')
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_none(self, mock_head, mock_parse):
+ mock_head.return_value = ["line1", "line2"]
+ mock_parse.side_effect = [None, None]
+ data = """line1
+line2
+ """
+ self.assertIsNone(utils.determin_log_format(data))
+
+ def test_determin_log_format_rfc5424(self):
+ data = """
+2003-10-11T22:14:15.003Z mymachine.example.com su
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "rfc5424")
+
+ def test_determin_log_format_syslog(self):
+ data = """
+Feb 12 18:30:08 15sp1-1 kernel:
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "syslog")
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_legacy(self, mock_head, mock_parse):
+ mock_head.return_value = ["Legacy 2003-10-11T22:14:15.003Z log"]
+ mock_parse.side_effect = [None, None, 123456]
+ data = """
+Legacy 003-10-11T22:14:15.003Z log data log
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "legacy")
+ mock_parse.assert_has_calls([
+ mock.call("Legacy 2003-10-11T22:14:15.003Z log", quiet=True),
+ mock.call("Legacy", quiet=True),
+ mock.call("2003-10-11T22:14:15.003Z", quiet=True)
+ ])
+
+ def test_get_timestamp_none(self):
+ self.assertIsNone(utils.get_timestamp("", "file1"))
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_rfc5424(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "rfc5424"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("2003-10-11T22:14:15.003Z mymachine.example.com su", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "rfc5424", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_syslog(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "syslog"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("Feb 12 18:30:08 15sp1-1 kernel:", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("Feb 12 18:30:08", "syslog", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_legacy(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "legacy"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("legacy 2003-10-11T22:14:15.003Z log data", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "legacy", "file1")
+
+ @mock.patch('crmsh.report.utils.diff_check')
+ def test_do_compare(self, mock_diff):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", node_list=["node1", "node2"])
+ mock_diff.side_effect = [[0, ""], [0, ""]]
+ rc, out = utils.do_compare(mock_ctx_inst, "file1")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_diff.assert_called_once_with("/opt/workdir/node1/file1", "/opt/workdir/node2/file1")
+
+ @mock.patch('os.path.isfile')
+ def test_diff_check_return(self, mock_isfile):
+ mock_isfile.return_value = False
+ rc, out = utils.diff_check("/opt/file1", "/opt/fil2")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "/opt/file1 does not exist\n")
+
+ @mock.patch('crmsh.report.utils.cib_diff')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isfile')
+ def test_diff_check(self, mock_isfile, mock_basename, mock_cib_diff):
+ mock_isfile.side_effect = [True, True]
+ mock_basename.return_value = "cib.xml"
+ mock_cib_diff.return_value = (0, "")
+ rc, out = utils.diff_check("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_txt_diff(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.txt_diff("txt1", "txt2")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('os.path.isfile')
+ def test_cib_diff_not_running(self, mock_isfile):
+ mock_isfile.side_effect = [True, False, False, True]
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "Can't compare cibs from running and stopped systems\n")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ @mock.patch('os.path.isfile')
+ def test_cib_diff(self, mock_isfile, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_isfile.side_effect = [True, True]
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_diff -c -n /opt/node1/cib.xml -o /opt/node2/cib.xml")
+
+ @mock.patch('os.symlink')
+ @mock.patch('shutil.move')
+ @mock.patch('os.remove')
+ @mock.patch('os.path.isfile')
+ def test_consolidate(self, mock_isfile, mock_remove, mock_move, mock_symlink):
+ mock_isfile.side_effect = [True, False]
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], work_dir="/opt/workdir")
+ utils.consolidate(mock_ctx_inst, "target_file")
+ mock_isfile.assert_has_calls([
+ mock.call("/opt/workdir/target_file"),
+ mock.call("/opt/workdir/target_file")
+ ])
+ mock_symlink.assert_has_calls([
+ mock.call('../target_file', '/opt/workdir/node1/target_file'),
+ mock.call('../target_file', '/opt/workdir/node2/target_file')
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer')
+ def test_do_sanitize(self, mock_sanitizer):
+ mock_inst = mock.Mock()
+ mock_sanitizer.return_value = mock_inst
+ mock_ctx_inst = mock.Mock()
+ utils.do_sanitize(mock_ctx_inst)
+ mock_inst.prepare.assert_called_once_with()
+ mock_inst.sanitize.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_empty(self, mock_read):
+ mock_read.return_value = ""
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_none(self, mock_read, mock_findln):
+ mock_read.return_value = "data"
+ mock_findln.return_value = None
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.filter_lines')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg(self, mock_read, mock_findln, mock_logger, mock_filter):
+ mock_read.return_value = "line1\nline2\nline3"
+ mock_filter.return_value = "line1\nline2\nline3"
+ res = utils.print_logseg("log1", 0, 0)
+ self.assertEqual(res, mock_filter.return_value)
+ mock_logger.debug2.assert_called_once_with("Including segment [%d-%d] from %s", 1, 3, "log1")
+
+ def test_head(self):
+ data = "line1\nline2\nline3"
+ res = utils.head(2, data)
+ self.assertEqual(res, ["line1", "line2"])
+
+ def test_tail(self):
+ data = "line1\nline2\nline3"
+ res = utils.tail(2, data)
+ self.assertEqual(res, ["line2", "line3"])
+
+ @mock.patch('crmsh.utils.get_open_method')
+ @mock.patch('builtins.open', create=True)
+ def test_write_to_file(self, mock_open, mock_method):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with('Hello')
+
+ @mock.patch('gzip.open')
+ @mock.patch('crmsh.utils.get_open_method')
+ def test_write_to_file_encode(self, mock_method, mock_open):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with(b'Hello')
+
+ @mock.patch('crmsh.report.utils.dt_to_str')
+ @mock.patch('crmsh.report.utils.ts_to_dt')
+ def test_ts_to_str(self, mock_ts_to_dt, mock_dt_to_str):
+ mock_ts_to_dt.return_value = datetime.datetime(2020, 2, 19, 21, 44, 7, 977355)
+ mock_dt_to_str.return_value = "2020-02-19 21:44"
+ res = utils.ts_to_str(1693519260.0)
+ self.assertEqual(res, mock_dt_to_str.return_value)
+
+ def test_ts_to_dt(self):
+ res = utils.ts_to_dt(crmutils.parse_to_timestamp("2023-09-01 06:01"))
+ self.assertEqual(utils.dt_to_str(res), "2023-09-01 06:01:00")
+
+ def test_now(self):
+ expected_res = datetime.datetime.now().strftime(constants.TIME_FORMAT)
+ res = utils.now()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.utils.now')
+ def test_create_description_template(self, mock_now, mock_isfile, mock_read, mock_str2file):
+ mock_now.return_value = "2023-09-01 06:01"
+ sys.argv = ["crm", "report", "option1"]
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/workdir")
+ mock_isfile.return_value = True
+ mock_read.return_value = "data"
+ utils.create_description_template(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.extract_critical_log')
+ @mock.patch('crmsh.report.utils.check_collected_files')
+ @mock.patch('crmsh.report.utils.compare_and_consolidate_files')
+ @mock.patch('glob.glob')
+ def test_analyze(self, mock_glob, mock_compare, mock_check_collected, mock_extract, mock_str2file):
+ mock_compare.return_value = "data"
+ mock_check_collected.return_value = ""
+ mock_extract.return_value = ""
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ utils.analyze(mock_ctx_inst)
+ mock_str2file.assert_called_once_with("data", f"/opt/work_dir/{constants.ANALYSIS_F}")
+
+ @mock.patch('crmsh.report.utils.consolidate')
+ @mock.patch('crmsh.report.utils.do_compare')
+ @mock.patch('glob.glob')
+ def test_compare_and_consolidate_files(self, mock_glob, mock_compare, mock_consolidate):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_glob.side_effect = [False, True, True, True, True]
+ mock_compare.side_effect = [(0, ""), (0, ""), (0, ""), (0, "")]
+ res = utils.compare_and_consolidate_files(mock_ctx_inst)
+ self.assertEqual(f"Diff {constants.MEMBERSHIP_F}... no {constants.MEMBERSHIP_F} found in /opt/work_dir\nDiff {constants.CRM_MON_F}... OK\nDiff {constants.COROSYNC_F}... OK\nDiff {constants.SYSINFO_F}... OK\nDiff {constants.CIB_F}... OK\n\n", res)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('crmsh.utils.file_is_empty')
+ @mock.patch('os.path.isfile')
+ def test_check_collected_files(self, mock_isfile, mock_is_empty, mock_read):
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work_dir")
+ mock_isfile.side_effect = [False, False, True]
+ mock_is_empty.return_value = False
+ mock_read.return_value = "data"
+ res = utils.check_collected_files(mock_ctx_inst)
+ self.assertEqual(res, ["Checking problems with permissions/ownership at node1:", "data"])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp_none(self, mock_parse, mock_error):
+ mock_parse.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ utils.parse_to_timestamp("xxxxx")
+ mock_error.assert_has_calls([
+ mock.call(f"Invalid time string 'xxxxx'"),
+ mock.call('Try these formats like: 2pm; "2019/9/5 12:30"; "09-Sep-07 2:00"; "[1-9][0-9]*[YmdHM]"')
+ ])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp(self, mock_parse, mock_error):
+ mock_parse.return_value = 1234567
+ res = utils.parse_to_timestamp("2023")
+ self.assertEqual(res, mock_parse.return_value)
+
+ def test_parse_to_timestamp_delta(self):
+ timedelta = datetime.timedelta(days=10)
+ expected_timestamp = (datetime.datetime.now() - timedelta).timestamp()
+ res = utils.parse_to_timestamp("10d")
+ self.assertEqual(int(res), int(expected_timestamp))
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('glob.glob')
+ def test_extract_critical_log(self, mock_glob, mock_read):
+ mock_glob.return_value = ["/opt/workdir/pacemaker.log"]
+ mock_read.return_value = """
+line1
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource
+line4
+ """
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ res = utils.extract_critical_log(mock_ctx_inst)
+ expected_data = """
+WARNINGS or ERRORS in pacemaker.log:
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource"""
+ self.assertEqual('\n'.join(res), expected_data)
+
+ def test_findln_by_timestamp_1(self):
+ pacemaker_file_path = "pacemaker.log.2"
+ with open(pacemaker_file_path) as f:
+ data = f.read()
+ data_list = data.split('\n')
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ first_timestamp = utils.get_timestamp(data_list[0], pacemaker_file_path)
+ middle_timestamp = utils.get_timestamp(data_list[1], pacemaker_file_path)
+ last_timestamp = utils.get_timestamp(data_list[2], pacemaker_file_path)
+ assert first_timestamp < middle_timestamp < last_timestamp
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 11:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 2
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 12:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 3
+
+ def test_findln_by_timestamp_irregular(self):
+ data = """line1
+ line2
+ line3
+ line4"""
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time)
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, "file1")
+ self.assertIsNone(result_line)
+
+ def test_findln_by_timestamp(self):
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ with open('pacemaker.log') as f:
+ data = f.read()
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ pacemaker_file_path = "pacemaker.log"
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_line_stamp = utils.get_timestamp(data.split('\n')[result_line-1], pacemaker_file_path)
+ assert result_line_stamp > target_time_stamp
+ result_pre_line_stamp = utils.get_timestamp(data.split('\n')[result_line-2], pacemaker_file_path)
+ assert result_pre_line_stamp < target_time_stamp
+
+ target_time = "Apr 03 11:01:19"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_time = ' '.join(data.split('\n')[result_line-1].split()[:3])
+ self.assertEqual(result_time, target_time)
+
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_not_syslog(self, mock_parse):
+ mock_parse.return_value = 123456
+ res = utils.get_timestamp_from_time_line("line1", "rfc5424", "file1")
+ self.assertEqual(res, mock_parse.return_value)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_next_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2024, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertIsNone(res)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_that_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2022, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertEqual(res, mock_parse.return_value)
diff --git a/test/unittests/test_sbd.py b/test/unittests/test_sbd.py
new file mode 100644
index 0000000..bc2b50a
--- /dev/null
+++ b/test/unittests/test_sbd.py
@@ -0,0 +1,894 @@
+import os
+import unittest
+import logging
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+
+
+class TestSBDTimeout(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDTimeout
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ _dict = {"sbd.watchdog_timeout": 5, "sbd.msgwait": 10}
+ _inst_q = mock.Mock()
+ self.sbd_timeout_inst = sbd.SBDTimeout(mock.Mock(profiles_dict=_dict, is_s390=True, qdevice_inst=_inst_q))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_initialize_timeout(self):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout = mock.Mock()
+ self.sbd_timeout_inst._set_sbd_msgwait = mock.Mock()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice = mock.Mock()
+ self.sbd_timeout_inst.initialize_timeout()
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout.assert_called_once()
+ self.sbd_timeout_inst._set_sbd_msgwait.assert_not_called()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice.assert_called_once()
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_watchdog_timeout(self, mock_warn):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to %d for s390, it was %d", sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390, 5)
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_msgwait(self, mock_warn):
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ self.sbd_timeout_inst._set_sbd_msgwait()
+ mock_warn.assert_called_once_with("sbd msgwait is set to %d, it was %d", 30, 10)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_qdevice_sync_timeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_sbd_stage(self, mock_is_configured, mock_is_active, mock_get_sync, mock_warn):
+ mock_is_configured.return_value = True
+ mock_is_active.return_value = True
+ mock_get_sync.return_value = 15
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 20 for qdevice, it was 5")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_all(self, mock_is_configured, mock_warn):
+ mock_is_configured.return_value = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 35 for qdevice, it was 5")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait_exception(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ self.assertEqual("Cannot get sbd msgwait for /dev/sda1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait(self, mock_run):
+ mock_run.return_value = """
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ res = sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ assert res == 10
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout_exception(self, mock_get):
+ mock_get.return_value = None
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ self.assertEqual("Cannot get the value of SBD_WATCHDOG_TIMEOUT", str(err.exception))
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout(self, mock_get):
+ mock_get.return_value = 5
+ res = sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ assert res == 5
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout_return(self, mock_active):
+ mock_active.return_value = False
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == sbd.SBDTimeout.STONITH_WATCHDOG_TIMEOUT_DEFAULT
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout(self, mock_active, mock_get_property):
+ mock_active.return_value = True
+ mock_get_property.return_value = "60s"
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == 60
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.utils.get_pcmk_delay_max')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_msgwait')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations(self, mock_2node, mock_get_sbd_dev, mock_get_msgwait, mock_pcmk_delay, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = ["/dev/sda1"]
+ mock_get_msgwait.return_value = 30
+ mock_pcmk_delay.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_msgwait.assert_called_once_with("/dev/sda1")
+ mock_pcmk_delay.assert_called_once_with(True)
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations_diskless(self, mock_2node, mock_get_sbd_dev, mock_get_watchdog_timeout, mock_get_stonith_watchdog_timeout, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = []
+ mock_get_watchdog_timeout.return_value = 30
+ mock_get_stonith_watchdog_timeout.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_watchdog_timeout.assert_called_once_with()
+ mock_get_stonith_watchdog_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 83
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected_diskless(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.stonith_watchdog_timeout = -1
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 71
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected_diskless(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_is_sbd_delay_start(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "100"
+ assert sbd.SBDTimeout.is_sbd_delay_start() is True
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start_return(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "100"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "no"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_called_once_with({"SBD_DELAY_START": "100"})
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_no(self, mock_get_sbd_value, mock_run):
+ mock_get_sbd_value.return_value = "no"
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_not_called()
+
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_return(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp):
+ mock_get_sbd_value.return_value = "10"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_not_called()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp, mock_str2file, mock_csync2, mock_cluster_run):
+ mock_get_sbd_value.return_value = "100"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_str2file.assert_called_once_with('[Service]\nTimeoutSec=120', '/etc/systemd/system/sbd.service.d/sbd_delay_start.conf')
+ mock_csync2.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_cluster_run.assert_called_once_with("systemctl daemon-reload")
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig_yes(self, mock_get_sbd_value, mock_get_sbd_timeout):
+ mock_get_sbd_value.return_value = "yes"
+ mock_get_sbd_timeout.return_value = 30
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 60
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "30"
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 30
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+
+class TestSBDManager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.sbd_inst = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1"], diskless_sbd=False))
+ self.sbd_inst_devices_gt_3 = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]))
+ self.sbd_inst_interactive = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=False))
+ self.sbd_inst_diskless = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=True))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_yes_to_all(self, mock_warn):
+ self.sbd_inst._context = mock.Mock(yes_to_all=True)
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_warn.assert_called_once_with(sbd.SBDManager.SBD_WARNING)
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_not_confirm(self, mock_warn, mock_status, mock_confirm):
+ self.sbd_inst._context.yes_to_all = False
+ mock_confirm.return_value = False
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_warn.assert_called_once_with("Not configuring SBD - STONITH will be disabled.")
+
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_already_configured(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_from_config.return_value = ["/dev/sda1"]
+ mock_no_overwrite.return_value = True
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sda1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ ])
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_called_once_with('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_null_and_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_called_once_with("Do you wish to use SBD?")
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+ ])
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt, mock_verify, mock_error_msg, mock_warn, mock_split):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False, True]
+ mock_from_config.return_value = []
+ mock_no_overwrite.return_value = False
+ mock_prompt.side_effect = ["/dev/test1", "/dev/sda1", "/dev/sdb1"]
+ mock_split.side_effect = [["/dev/test1"], ["/dev/sda1"], ["/dev/sdb1"]]
+ mock_verify.side_effect = [ValueError("/dev/test1 error"), None, None]
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sdb1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ mock.call("Are you sure you wish to use this device?")
+ ])
+ mock_from_config.assert_called_once_with()
+ mock_error_msg.assert_called_once_with("/dev/test1 error")
+ mock_warn.assert_has_calls([
+ mock.call("All data on /dev/sda1 will be destroyed!"),
+ mock.call("All data on /dev/sdb1 will be destroyed!")
+ ])
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*') for x in range(3)
+ ])
+ mock_split.assert_has_calls([
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/test1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sda1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sdb1"),
+ ])
+
+ def test_verify_sbd_device_gt_3(self):
+ assert self.sbd_inst_devices_gt_3.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]
+ dev_list = self.sbd_inst_devices_gt_3.sbd_devices_input
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst_devices_gt_3._verify_sbd_device(dev_list)
+ self.assertEqual("Maximum number of SBD device is 3", str(err.exception))
+
+ @mock.patch('crmsh.sbd.SBDManager._compare_device_uuid')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_sbd_device_not_block(self, mock_block_device, mock_compare):
+ assert self.sbd_inst.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1"]
+ dev_list = self.sbd_inst.sbd_devices_input
+ mock_block_device.side_effect = [True, False]
+
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._verify_sbd_device(dev_list)
+ self.assertEqual("/dev/sdc1 doesn't look like a block device", str(err.exception))
+
+ mock_block_device.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdc1")])
+ mock_compare.assert_called_once_with("/dev/sdb1", [])
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ def test_get_sbd_device_from_option(self, mock_verify):
+ self.sbd_inst._get_sbd_device()
+ mock_verify.assert_called_once_with(['/dev/sdb1', '/dev/sdc1'])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_interactive')
+ def test_get_sbd_device_from_interactive(self, mock_interactive):
+ mock_interactive.return_value = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst_interactive._get_sbd_device()
+ mock_interactive.assert_called_once_with()
+
+ def test_get_sbd_device_diskless(self):
+ self.sbd_inst_diskless._get_sbd_device()
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd_return(self, mock_info, mock_sbd_timeout):
+ mock_inst = mock.Mock()
+ mock_sbd_timeout.return_value = mock_inst
+ self.sbd_inst_diskless._context = mock.Mock(profiles_dict={})
+ self.sbd_inst_diskless._initialize_sbd()
+ mock_info.assert_called_once_with("Configuring diskless SBD")
+ mock_inst.initialize_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd(self, mock_info, mock_sbd_timeout, mock_invoke, mock_error):
+ mock_inst = mock.Mock(sbd_msgwait=10, sbd_watchdog_timeout=5)
+ mock_sbd_timeout.return_value = mock_inst
+ mock_inst.set_sbd_watchdog_timeout = mock.Mock()
+ mock_inst.set_sbd_msgwait = mock.Mock()
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ mock_invoke.side_effect = [(True, None, None), (False, None, "error")]
+ mock_error.side_effect = ValueError
+
+ with self.assertRaises(ValueError):
+ self.sbd_inst._initialize_sbd()
+
+ mock_invoke.assert_has_calls([
+ mock.call("sbd -4 10 -1 5 -d /dev/sdb1 create"),
+ mock.call("sbd -4 10 -1 5 -d /dev/sdc1 create")
+ ])
+ mock_error.assert_called_once_with("Failed to initialize SBD device /dev/sdc1: error")
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('shutil.copyfile')
+ def test_update_configuration(self, mock_copy, mock_sysconfig, mock_update):
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst._watchdog_inst = mock.Mock(watchdog_device_name="/dev/watchdog")
+ self.sbd_inst.timeout_inst = mock.Mock(sbd_watchdog_timeout=15)
+
+ self.sbd_inst._update_sbd_configuration()
+
+ mock_copy.assert_called_once_with("/usr/share/fillup-templates/sysconfig.sbd", "/etc/sysconfig/sbd")
+ mock_sysconfig.assert_called_once_with("/etc/sysconfig/sbd", SBD_WATCHDOG_DEV='/dev/watchdog', SBD_DEVICE='/dev/sdb1;/dev/sdc1', SBD_WATCHDOG_TIMEOUT="15")
+ mock_update.assert_called_once_with("/etc/sysconfig/sbd")
+
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config_none(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = None
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == []
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config(self, mock_parse, mock_split):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/sdb1;/dev/sdc1"
+ mock_split.return_value = ["/dev/sdb1", "/dev/sdc1"]
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == ["/dev/sdb1", "/dev/sdc1"]
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+ mock_split.assert_called_once_with(sbd.SBDManager.PARSE_RE, "/dev/sdb1;/dev/sdc1")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_diskless(self, mock_vote, mock_warn):
+ self.sbd_inst_diskless._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst_diskless._warn_diskless_sbd()
+ mock_vote.assert_not_called()
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_peer(self, mock_vote, mock_warn):
+ mock_vote.return_value = {'Expected': '1'}
+ self.sbd_inst_diskless._warn_diskless_sbd("node2")
+ mock_vote.assert_called_once_with("node2")
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.sbd_init()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_return(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_invoke):
+ mock_package.return_value = True
+ self.sbd_inst._sbd_devices = None
+ self.sbd_inst.diskless_sbd = False
+ self.sbd_inst._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+
+ self.sbd_inst.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_not_called()
+ mock_update.assert_not_called()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._enable_sbd_service')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_warn, mock_enable_sbd):
+ mock_package.return_value = True
+ self.sbd_inst_diskless._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+ self.sbd_inst_diskless.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_called_once_with()
+ mock_update.assert_called_once_with()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_warn.assert_called_once_with()
+ mock_enable_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_no_ra_running(self, mock_parser, mock_status, mock_cluster_run, mock_wait, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = False
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_status.assert_called_once_with("Restarting cluster service")
+ mock_cluster_run.assert_called_once_with("crm cluster restart")
+ mock_wait.assert_called_once_with()
+ mock_config_sbd_ra.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_diskless(self, mock_parser, mock_warn, mock_get_timeout):
+ mock_parser().is_any_resource_running.return_value = True
+ mock_get_timeout.return_value = 60
+ self.sbd_inst_diskless.timeout_inst = mock.Mock(stonith_watchdog_timeout=-1)
+ self.sbd_inst_diskless._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ mock.call("Then run \"crm configure property stonith-enabled=true stonith-watchdog-timeout=-1 stonith-timeout=60\" on any node")
+ ])
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed(self, mock_parser, mock_warn, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = True
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_enable_sbd_service_init(self, mock_invoke):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst._enable_sbd_service()
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._restart_cluster_and_configure_sbd_ra')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ def test_enable_sbd_service_restart(self, mock_cluster_run, mock_restart):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._enable_sbd_service()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable sbd.service"),
+ ])
+ mock_restart.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.configure_sbd_resource_and_properties()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sbd.SBDTimeout.adjust_sbd_timeout_related_cluster_configuration')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties(
+ self,
+ mock_package, mock_enabled, mock_parser, mock_run, mock_set_property, sbd_adjust, mock_is_active,
+ ):
+ mock_package.return_value = True
+ mock_enabled.return_value = True
+ mock_parser().is_resource_configured.return_value = False
+ mock_is_active.return_value = False
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._get_sbd_device_from_config = mock.Mock()
+ self.sbd_inst._get_sbd_device_from_config.return_value = ["/dev/sda1"]
+
+ self.sbd_inst.configure_sbd_resource_and_properties()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_run.assert_called_once_with("crm configure primitive {} {}".format(sbd.SBDManager.SBD_RA_ID, sbd.SBDManager.SBD_RA))
+ mock_set_property.assert_called_once_with("stonith-enabled", "true")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_exist(self, mock_package, mock_exists, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_disabled(self, mock_package, mock_exists, mock_enabled, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = False
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_verify, mock_status):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb1"]
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sdb1"], ["node1"])
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_warn, mock_status, mock_set):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = []
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_warn.assert_called_once_with("node1")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got diskless SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod_exception(self, mock_get_config):
+ mock_get_config.return_value = []
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDManager.verify_sbd_device()
+ self.assertEqual("No sbd device configured", str(err.exception))
+ mock_get_config.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod(self, mock_get_config, mock_list_nodes, mock_verify):
+ mock_get_config.return_value = ["/dev/sda1"]
+ mock_list_nodes.return_value = ["node1"]
+ sbd.SBDManager.verify_sbd_device()
+ mock_get_config.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sda1"], ["node1"])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid_return(self, mock_get_uuid):
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", None)
+ mock_get_uuid.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid(self, mock_get_uuid):
+ mock_get_uuid.side_effect = ["1234", "5678"]
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", ["node1"])
+ self.assertEqual("Device /dev/sdb1 doesn't have the same UUID with node1", str(err.exception))
+ mock_get_uuid.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdb1", "node1")])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid_not_match(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._get_device_uuid("/dev/sdb1")
+ self.assertEqual("Cannot find sbd device UUID for /dev/sdb1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sdb1 dump", None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid(self, mock_run):
+ output = """
+ ==Dumping header on disk /dev/sda1
+ Header version : 2.1
+ UUID : a2e9a92c-cc72-4ef9-ac55-ccc342f3546b
+ Number of slots : 255
+ Sector size : 512
+ Timeout (watchdog) : 5
+ Timeout (allocate) : 2
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ mock_run.return_value = output
+ res = self.sbd_inst._get_device_uuid("/dev/sda1", node="node1")
+ self.assertEqual(res, "a2e9a92c-cc72-4ef9-ac55-ccc342f3546b")
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump", "node1")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_true(self, mock_context, mock_is_active, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = []
+ mock_is_active.return_value = True
+ assert sbd.SBDManager.is_using_diskless_sbd() is True
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+ mock_is_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_false(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.is_using_diskless_sbd() is False
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_get_sbd_device_from_config_classmethod(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.get_sbd_device_from_config() == ["/dev/sda1"]
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ def test_update_configuration_static(self, mock_config_set, mock_csync2):
+ sbd_config_dict = {
+ "SBD_PACEMAKER": "yes",
+ "SBD_STARTMODE": "always",
+ "SBD_DELAY_START": "no",
+ }
+ self.sbd_inst.update_configuration(sbd_config_dict)
+ mock_config_set.assert_called_once_with(bootstrap.SYSCONFIG_SBD, **sbd_config_dict)
+ mock_csync2.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
diff --git a/test/unittests/test_scripts.py b/test/unittests/test_scripts.py
new file mode 100644
index 0000000..04c74e2
--- /dev/null
+++ b/test/unittests/test_scripts.py
@@ -0,0 +1,914 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from builtins import str
+from builtins import object
+from os import path
+from pprint import pprint
+import pytest
+from lxml import etree
+from crmsh import scripts
+from crmsh import ra
+from crmsh import utils
+
+scripts._script_dirs = lambda: [path.join(path.dirname(__file__), 'scripts')]
+
+_apache = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="apache">
+<version>1.0</version>
+
+<longdesc lang="en">
+This is the resource agent for the Apache Web server.
+This resource agent operates both version 1.x and version 2.x Apache
+servers.
+
+The start operation ends with a loop in which monitor is
+repeatedly called to make sure that the server started and that
+it is operational. Hence, if the monitor operation does not
+succeed within the start operation timeout, the apache resource
+will end with an error status.
+
+The monitor operation by default loads the server status page
+which depends on the mod_status module and the corresponding
+configuration file (usually /etc/apache2/mod_status.conf).
+Make sure that the server status page works and that the access
+is allowed *only* from localhost (address 127.0.0.1).
+See the statusurl and testregex attributes for more details.
+
+See also http://httpd.apache.org/
+</longdesc>
+<shortdesc lang="en">Manages an Apache Web server instance</shortdesc>
+
+<parameters>
+<parameter name="configfile" required="0" unique="1">
+<longdesc lang="en">
+The full pathname of the Apache configuration file.
+This file is parsed to provide defaults for various other
+resource agent parameters.
+</longdesc>
+<shortdesc lang="en">configuration file path</shortdesc>
+<content type="string" default="$(detect_default_config)" />
+</parameter>
+
+<parameter name="httpd">
+<longdesc lang="en">
+The full pathname of the httpd binary (optional).
+</longdesc>
+<shortdesc lang="en">httpd binary path</shortdesc>
+<content type="string" default="/usr/sbin/httpd" />
+</parameter>
+
+<parameter name="port" >
+<longdesc lang="en">
+A port number that we can probe for status information
+using the statusurl.
+This will default to the port number found in the
+configuration file, or 80, if none can be found
+in the configuration file.
+
+</longdesc>
+<shortdesc lang="en">httpd port</shortdesc>
+<content type="integer" />
+</parameter>
+
+<parameter name="statusurl">
+<longdesc lang="en">
+The URL to monitor (the apache server status page by default).
+If left unspecified, it will be inferred from
+the apache configuration file.
+
+If you set this, make sure that it succeeds *only* from the
+localhost (127.0.0.1). Otherwise, it may happen that the cluster
+complains about the resource being active on multiple nodes.
+</longdesc>
+<shortdesc lang="en">url name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex">
+<longdesc lang="en">
+Regular expression to match in the output of statusurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">monitor regular expression</shortdesc>
+<content type="string" default="exists, but impossible to show in a human readable format (try grep testregex)"/>
+</parameter>
+
+<parameter name="client">
+<longdesc lang="en">
+Client to use to query to Apache. If not specified, the RA will
+try to find one on the system. Currently, wget and curl are
+supported. For example, you can set this parameter to "curl" if
+you prefer that to wget.
+</longdesc>
+<shortdesc lang="en">http client</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="testurl">
+<longdesc lang="en">
+URL to test. If it does not start with "http", then it's
+considered to be relative to the Listen address.
+</longdesc>
+<shortdesc lang="en">test url</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex10">
+<longdesc lang="en">
+Regular expression to match in the output of testurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">extended monitor regular expression</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testconffile">
+<longdesc lang="en">
+A file which contains test configuration. Could be useful if
+you have to check more than one web application or in case sensitive
+info should be passed as arguments (passwords). Furthermore,
+using a config file is the only way to specify certain
+parameters.
+
+Please see README.webapps for examples and file description.
+</longdesc>
+<shortdesc lang="en">test configuration file</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testname">
+<longdesc lang="en">
+Name of the test within the test configuration file.
+</longdesc>
+<shortdesc lang="en">test name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="options">
+<longdesc lang="en">
+Extra options to apply when starting apache. See man httpd(8).
+</longdesc>
+<shortdesc lang="en">command line options</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="envfiles">
+<longdesc lang="en">
+Files (one or more) which contain extra environment variables.
+If you want to prevent script from reading the default file, set
+this parameter to empty string.
+</longdesc>
+<shortdesc lang="en">environment settings files</shortdesc>
+<content type="string" default="/etc/apache2/envvars"/>
+</parameter>
+
+<parameter name="use_ipv6">
+<longdesc lang="en">
+We will try to detect if the URL (for monitor) is IPv6, but if
+that doesn't work set this to true to enforce IPv6.
+</longdesc>
+<shortdesc lang="en">use ipv6 with http clients</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="60s" />
+<action name="status" timeout="30s" />
+<action name="monitor" depth="0" timeout="20s" interval="10" />
+<action name="meta-data" timeout="5" />
+<action name="validate-all" timeout="5" />
+</actions>
+</resource-agent>
+'''
+
+_virtual_ip = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="IPaddr2">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource manages IP alias IP addresses.
+It can add an IP alias, or remove one.
+In addition, it can implement Cluster Alias IP functionality
+if invoked as a clone resource.
+
+If used as a clone, you should explicitly set clone-node-max &gt;= 2,
+and/or clone-max &lt; number of nodes. In case of node failure,
+clone instances need to be re-allocated on surviving nodes.
+This would not be possible if there is already an instance on those nodes,
+and clone-node-max=1 (which is the default).
+</longdesc>
+
+<shortdesc lang="en">Manages virtual IPv4 and IPv6 addresses (Linux specific version)</shortdesc>
+
+<parameters>
+<parameter name="ip" unique="1" required="1">
+<longdesc lang="en">
+The IPv4 (dotted quad notation) or IPv6 address (colon hexadecimal notation)
+example IPv4 "192.168.1.1".
+example IPv6 "2001:db8:DC28:0:0:FC57:D4C8:1FFF".
+</longdesc>
+<shortdesc lang="en">IPv4 or IPv6 address</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="nic" unique="0">
+<longdesc lang="en">
+The base network interface on which the IP address will be brought
+online.
+If left empty, the script will try and determine this from the
+routing table.
+
+Do NOT specify an alias interface in the form eth0:1 or anything here;
+rather, specify the base interface only.
+If you want a label, see the iflabel parameter.
+
+Prerequisite:
+
+There must be at least one static IP address, which is not managed by
+the cluster, assigned to the network interface.
+If you can not assign any static IP address on the interface,
+modify this kernel parameter:
+
+sysctl -w net.ipv4.conf.all.promote_secondaries=1 # (or per device)
+</longdesc>
+<shortdesc lang="en">Network interface</shortdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="cidr_netmask">
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0)
+
+If unspecified, the script will also try to determine this from the
+routing table.
+</longdesc>
+<shortdesc lang="en">CIDR netmask</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="broadcast">
+<longdesc lang="en">
+Broadcast address associated with the IP. If left empty, the script will
+determine this from the netmask.
+</longdesc>
+<shortdesc lang="en">Broadcast address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="iflabel">
+<longdesc lang="en">
+You can specify an additional label for your IP address here.
+This label is appended to your interface name.
+
+The kernel allows alphanumeric labels up to a maximum length of 15
+characters including the interface name and colon (e.g. eth0:foobar1234)
+
+A label can be specified in nic parameter but it is deprecated.
+If a label is specified in nic name, this parameter has no effect.
+</longdesc>
+<shortdesc lang="en">Interface label</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="lvs_support">
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+
+Notes for IPv6:
+It is not necessary to enable this option on IPv6.
+Instead, enable 'lvs_ipv6_addrlabel' option for LVS-DR usage on IPv6.
+</longdesc>
+<shortdesc lang="en">Enable support for LVS DR</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_support_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel">
+<longdesc lang="en">
+Enable adding IPv6 address label so IPv6 traffic originating from
+the address's interface does not use this address as the source.
+This is necessary for LVS-DR health checks to realservers to work. Without it,
+the most recently added IPv6 address (probably the address added by IPaddr2)
+will be used as the source address for IPv6 traffic from that interface and
+since that address exists on loopback on the realservers, the realserver
+response to pings/connections will never leave its loopback.
+See RFC3484 for the detail of the source address selection.
+
+See also 'lvs_ipv6_addrlabel_value' parameter.
+</longdesc>
+<shortdesc lang="en">Enable adding IPv6 address label.</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_ipv6_addrlabel_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel_value">
+<longdesc lang="en">
+Specify IPv6 address label value used when 'lvs_ipv6_addrlabel' is enabled.
+The value should be an unused label in the policy table
+which is shown by 'ip addrlabel list' command.
+You would rarely need to change this parameter.
+</longdesc>
+<shortdesc lang="en">IPv6 address label value.</shortdesc>
+<content type="integer" default="${OCF_RESKEY_lvs_ipv6_addrlabel_value_default}"/>
+</parameter>
+
+<parameter name="mac">
+<longdesc lang="en">
+Set the interface MAC address explicitly. Currently only used in case of
+the Cluster IP Alias. Leave empty to chose automatically.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP MAC address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="clusterip_hash">
+<longdesc lang="en">
+Specify the hashing algorithm used for the Cluster IP functionality.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP hashing function</shortdesc>
+<content type="string" default="${OCF_RESKEY_clusterip_hash_default}"/>
+</parameter>
+
+<parameter name="unique_clone_address">
+<longdesc lang="en">
+If true, add the clone ID to the supplied value of IP to create
+a unique address to manage
+</longdesc>
+<shortdesc lang="en">Create a unique address for cloned instances</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_unique_clone_address_default}"/>
+</parameter>
+
+<parameter name="arp_interval">
+<longdesc lang="en">
+Specify the interval between unsolicited ARP packets in milliseconds.
+</longdesc>
+<shortdesc lang="en">ARP packet interval in ms</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
+</parameter>
+
+<parameter name="arp_count">
+<longdesc lang="en">
+Number of unsolicited ARP packets to send.
+</longdesc>
+<shortdesc lang="en">ARP packet count</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_count_default}"/>
+</parameter>
+
+<parameter name="arp_bg">
+<longdesc lang="en">
+Whether or not to send the ARP packets in the background.
+</longdesc>
+<shortdesc lang="en">ARP from background</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
+</parameter>
+
+<parameter name="arp_mac">
+<longdesc lang="en">
+MAC address to send the ARP packets to.
+
+You really shouldn't be touching this.
+
+</longdesc>
+<shortdesc lang="en">ARP MAC</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_mac_default}"/>
+</parameter>
+
+<parameter name="arp_sender">
+<longdesc lang="en">
+The program to send ARP packets with on start. For infiniband
+interfaces, default is ipoibarping. If ipoibarping is not
+available, set this to send_arp.
+</longdesc>
+<shortdesc lang="en">ARP sender</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="flush_routes">
+<longdesc lang="en">
+Flush the routing table on stop. This is for
+applications which use the cluster IP address
+and which run on the same physical host that the
+IP address lives on. The Linux kernel may force that
+application to take a shortcut to the local loopback
+interface, instead of the interface the address
+is really bound to. Under those circumstances, an
+application may, somewhat unexpectedly, continue
+to use connections for some time even after the
+IP address is deconfigured. Set this parameter in
+order to immediately disable said shortcut when the
+IP address goes away.
+</longdesc>
+<shortdesc lang="en">Flush kernel routing table on stop</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+<actions>
+<action name="start" timeout="20s" />
+<action name="stop" timeout="20s" />
+<action name="status" depth="0" timeout="20s" interval="10s" />
+<action name="monitor" depth="0" timeout="20s" interval="10s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="20s" />
+</actions>
+</resource-agent>
+'''
+
+_saved_get_ra = ra.get_ra
+_saved_cluster_nodes = utils.list_cluster_nodes
+
+
+def setup_function():
+ "hijack ra.get_ra to add new resource class (of sorts)"
+ class Agent(object):
+ def __init__(self, name):
+ self.name = name
+
+ def meta(self):
+ if self.name == 'apache':
+ return etree.fromstring(_apache)
+ else:
+ return etree.fromstring(_virtual_ip)
+
+ def _get_ra(agent):
+ if agent.startswith('test:'):
+ return Agent(agent[5:])
+ return _saved_get_ra(agent)
+ ra.get_ra = _get_ra
+
+ utils.list_cluster_nodes = lambda: [utils.this_node(), 'a', 'b', 'c']
+
+
+def teardown_function():
+ ra.get_ra = _saved_get_ra
+ utils.list_cluster_nodes = _saved_cluster_nodes
+
+
+def test_list():
+ assert set(['v2', 'legacy', '10-webserver', 'inc1', 'inc2', 'vip', 'vipinc', 'unified']) == set(s for s in scripts.list_scripts())
+
+
+def test_load_legacy():
+ script = scripts.load_script('legacy')
+ assert script is not None
+ assert 'legacy' == script['name']
+ assert len(script['shortdesc']) > 0
+ pprint(script)
+ actions = scripts.verify(script, {}, external_check=False)
+ pprint(actions)
+ assert [{'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Configure SSH',
+ 'text': '',
+ 'value': 'configure.py ssh'},
+ {'longdesc': '',
+ 'name': 'collect',
+ 'shortdesc': 'Check state of nodes',
+ 'text': '',
+ 'value': 'collect.py'},
+ {'longdesc': '',
+ 'name': 'validate',
+ 'shortdesc': 'Verify parameters',
+ 'text': '',
+ 'value': 'verify.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Install packages',
+ 'text': '',
+ 'value': 'configure.py install'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Generate corosync authkey',
+ 'text': '',
+ 'value': 'authkey.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Configure cluster nodes',
+ 'text': '',
+ 'value': 'configure.py corosync'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Initialize cluster',
+ 'text': '',
+ 'value': 'init.py'}] == actions
+
+
+def test_load_workflow():
+ script = scripts.load_script('10-webserver')
+ assert script is not None
+ assert '10-webserver' == script['name']
+ assert len(script['shortdesc']) > 0
+
+
+def test_v2():
+ script = scripts.load_script('v2')
+ assert script is not None
+ assert 'v2' == script['name']
+ assert len(script['shortdesc']) > 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': False}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert str(actions[0]['text']).find('group www') >= 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': True}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 3
+
+
+def test_agent_include():
+ inc2 = scripts.load_script('inc2')
+ actions = scripts.verify(
+ inc2,
+ {'wiz': 'abc',
+ 'foo': 'cde',
+ 'included-script': {'foo': True, 'bar': 'bah bah'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 6
+ assert '33\n\nabc' == actions[-1]['text'].strip()
+
+
+def test_vipinc():
+ script = scripts.load_script('vipinc')
+ assert script is not None
+ actions = scripts.verify(
+ script,
+ {'vip': {'id': 'vop', 'ip': '10.0.0.4'}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'].find('primitive vop test:virtual-ip\n\tip="10.0.0.4"') >= 0
+ assert actions[0]['text'].find("clone c-vop vop") >= 0
+
+
+def test_value_replace_handles():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ value: bar
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: test-a
+ parameters:
+ - name: foo
+ value: "{{wiz}}+{{wiz}}"
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "{{test-a:foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "SARUMAN+SARUMAN"
+
+
+def test_optional_step_ref():
+ """
+ It seems I have a bug in referencing ids from substeps.
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ required: false
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "primitive {{wiz}} {{apache:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_a,
+ {"id": "apacho"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive apacho test:apache"
+
+ #import ipdb
+ #ipdb.set_trace()
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN", "apache": {"id": "apacho"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive SARUMAN apacho"
+
+
+def test_enums_basic():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "one"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "one"
+
+ actions = scripts.verify(script_a,
+ {"foo": "three"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "three"
+
+
+def test_enums_fail():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "wrong"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_enums_fail2():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "one"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_two_substeps():
+ """
+ There is a scoping bug
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ name: apache-a
+ required: true
+ - script: apache
+ name: apache-b
+ required: true
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - include: apache-a
+ - include: apache-b
+ - cib: "primitive {{wiz}} {{apache-a:id}} {{apache-b:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_b,
+ {'wiz': "head", "apache-a": {"id": "one"}, "apache-b": {"id": "two"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive one test:apache\n\nprimitive two test:apache\n\nprimitive head one two"
+
+
+def test_required_subscript_params():
+ """
+ If an optional subscript has multiple required parameters,
+ excluding all = ok
+ excluding one = fail
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ - name: bar
+ required: true
+ type: string
+ actions:
+ - cib: "{{foo}} {{bar}}"
+'''
+
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: foofoo
+ required: false
+ actions:
+ - include: foofoo
+ - cib: "{{foofoo:foo}} {{foofoo:bar}"
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ def ver():
+ actions = scripts.verify(script_b,
+ {"foofoo": {"foo": "one"}}, external_check=False)
+ pprint(actions)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_unified():
+ unified = scripts.load_script('unified')
+ actions = scripts.verify(
+ unified,
+ {'id': 'foo',
+ 'vip': {'id': 'bar', 'ip': '192.168.0.15'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert 'primitive bar IPaddr2 ip=192.168.0.15\ngroup g-foo foo bar' == actions[-1]['text'].strip()
+
+
+class TestPrinter(object):
+ def __init__(self):
+ import types
+ self.actions = []
+
+ def add_capture(name):
+ def capture(obj, *args):
+ obj.actions.append((name, args))
+ self.__dict__[name] = types.MethodType(capture, self)
+ for name in ('print_header', 'debug', 'error', 'start', 'flush', 'print_command', 'finish'):
+ add_capture(name)
+
+def test_inline_script():
+ """
+ Test inline script feature for call actions
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ actions:
+ - call: |
+ #!/bin/sh
+ echo "{{foo}}"
+ nodes: local
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "hello world"}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert actions[0]['name'] == 'call'
+ assert actions[0]['value'] == '#!/bin/sh\necho "hello world"'
+ tp = TestPrinter()
+ scripts.run(script_a,
+ {"foo": "hello world"}, tp)
+
+ for action, args in tp.actions:
+ print(action, args)
+ if action == 'finish':
+ assert args[0]['value'] == '#!/bin/sh\necho "hello world"'
+
+
+def test_when_expression():
+ """
+ Test when expressions
+ """
+ def runtest(when, val):
+ the_script = '''version: 2.2
+shortdesc: Test when expressions
+longdesc: See if more complicated expressions work
+parameters:
+ - name: stringtest
+ type: string
+ shortdesc: A test string
+actions:
+ - call: "echo '{{stringtest}}'"
+ when: %s
+'''
+ scrpt = scripts.load_script_string('{}_{}'.format(when, val), the_script % when)
+ assert scrpt is not None
+
+ a1 = scripts.verify(scrpt,
+ {"stringtest": val},
+ external_check=False)
+ pprint(a1)
+ return a1
+
+ a1 = runtest('stringtest == "balloon"', "balloon")
+ assert len(a1) == 1 and a1[0]['value'] == "echo 'balloon'"
+
+ a1 = runtest('stringtest == "balloon"', "not a balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest != "balloon"', "not a balloon")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest != "balloon"', "balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest == "{{dry_run}}"', "no")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest == "yes" or stringtest == "no"', "yes")
+ assert len(a1) == 1
diff --git a/test/unittests/test_service_manager.py b/test/unittests/test_service_manager.py
new file mode 100644
index 0000000..082fc3c
--- /dev/null
+++ b/test/unittests/test_service_manager.py
@@ -0,0 +1,84 @@
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.service_manager import ServiceManager
+
+
+@mock.patch("crmsh.service_manager.ServiceManager._call_with_parallax")
+class TestServiceManager(unittest.TestCase):
+ """
+ Unitary tests for class ServiceManager
+ """
+
+ def setUp(self) -> None:
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._call = mock.Mock(self.service_manager._call)
+
+ def test_call_single_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 0
+ self.assertEqual(['node1'], self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_single_node_failure(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 1
+ self.assertEqual(list(), self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_multiple_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ mock_call_with_parallax.return_value = {'node1': (0, '', ''), 'node2': (1, 'out', 'err')}
+ self.assertEqual(['node1'], self.service_manager._call(None, ['node1', 'node2'], 'foo'))
+ self.service_manager._run_on_single_host.assert_not_called()
+ mock_call_with_parallax.assert_called_once_with('foo', ['node1', 'node2'])
+
+ def test_run_on_single_host_return_1(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (1, 'bar', 'err')
+ self.assertEqual(1, self.service_manager._run_on_single_host('foo', 'node1'))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_run_on_single_host_return_255(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (255, 'bar', 'err')
+ with self.assertRaises(ValueError):
+ self.service_manager._run_on_single_host('foo', 'node1')
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_start_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl start 'service1'")
+
+ def test_start_service_on_multiple_host(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1', 'node2']
+ self.assertEqual(['node1', 'node2'], self.service_manager.start_service('service1', node_list=['node1', 'node2']))
+ self.service_manager._call.assert_called_once_with(None, ['node1', 'node2'], "systemctl start 'service1'")
+
+ def test_start_and_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', enable=True, remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable --now 'service1'")
+
+ def test_stop_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.stop_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl stop 'service1'")
+
+ def test_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.enable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable 'service1'")
+
+ def test_disable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.disable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl disable 'service1'")
diff --git a/test/unittests/test_sh.py b/test/unittests/test_sh.py
new file mode 100644
index 0000000..b3c0f0b
--- /dev/null
+++ b/test/unittests/test_sh.py
@@ -0,0 +1,189 @@
+import subprocess
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.user_of_host import UserOfHost
+
+
+class TestLocalShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.local_shell = crmsh.sh.LocalShell()
+ self.local_shell.get_effective_user_name = mock.Mock(self.local_shell.get_effective_user_name)
+ self.local_shell.geteuid = mock.Mock(self.local_shell.geteuid)
+ self.local_shell.hostname = mock.Mock(self.local_shell.hostname)
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'alice', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['su', 'alice', '--login', '-s', '/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_as_root(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_unauthorized(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'bob'
+ self.local_shell.geteuid.return_value = 1001
+ with self.assertRaises(crmsh.sh.AuthorizationError) as ctx:
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ self.assertIsInstance(ctx.exception, ValueError)
+
+ def test_get_stdout_stderr_decoded_and_stripped(self):
+ self.local_shell.get_rc_stdout_stderr_raw = mock.Mock(self.local_shell.get_rc_stdout_stderr_raw)
+ self.local_shell.get_rc_stdout_stderr_raw.return_value = 1, b' out \n', b'\terr\t\n'
+ rc, out, err = self.local_shell.get_rc_stdout_stderr('alice', 'foo', 'input')
+ self.assertEqual(1, rc)
+ self.assertEqual('out', out)
+ self.assertEqual('err', err)
+ self.local_shell.get_rc_stdout_stderr_raw.assert_called_once_with(
+ 'alice', 'foo', b'input',
+ )
+
+ def test_get_stdout_or_raise_error(self):
+ self.local_shell.su_subprocess_run = mock.Mock(self.local_shell.su_subprocess_run)
+ self.local_shell.su_subprocess_run.return_value = subprocess.CompletedProcess(
+ args=mock.Mock(),
+ returncode=1,
+ stdout=b'foo',
+ stderr=b' bar ',
+ )
+ with self.assertRaises(crmsh.sh.CommandFailure) as ctx:
+ self.local_shell.get_stdout_or_raise_error('root', 'foo')
+ self.assertIsInstance(ctx.exception, ValueError)
+
+
+class TestSSHShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.ssh_shell = crmsh.sh.SSHShell(mock.Mock(crmsh.sh.LocalShell), 'alice')
+ self.ssh_shell.local_shell.hostname.return_value = 'node1'
+ self.ssh_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.ssh_shell.local_shell.can_run_as.return_value = True
+
+ def test_can_run_as(self):
+ self.ssh_shell.local_shell.get_rc_and_error.return_value = 255, 'bar'
+ self.assertFalse(self.ssh_shell.can_run_as('node2', 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_not_called()
+
+ def test_can_run_as_local(self):
+ self.assertTrue(self.ssh_shell.can_run_as(None, 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_called_once_with('root')
+
+ def test_subprocess_run_without_input(self):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ args, kwargs = self.ssh_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar'
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+
+ @mock.patch('subprocess.run')
+ def test_subprocess_run_without_input_local(self, mock_run):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node1', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ mock_run.assert_called_once_with(
+ ['sudo', '-H', '-u', 'bob', '/bin/sh'],
+ input=b'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+
+class TestClusterShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.cluster_shell = crmsh.sh.ClusterShell(mock.Mock(crmsh.sh.LocalShell), mock.Mock(UserOfHost))
+ self.cluster_shell.local_shell.hostname.return_value = 'node1'
+ self.cluster_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.cluster_shell.local_shell.can_run_as.return_value = True
+ self.cluster_shell.user_of_host.user_pair_for_ssh.return_value = ('alice', 'bob')
+
+ def test_subprocess_run_without_input(self):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.cluster_shell.user_of_host.user_pair_for_ssh.assert_called_once_with('node2')
+ args, kwargs = self.cluster_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertIn('-u root', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar',
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
diff --git a/test/unittests/test_time.py b/test/unittests/test_time.py
new file mode 100644
index 0000000..b252a5d
--- /dev/null
+++ b/test/unittests/test_time.py
@@ -0,0 +1,24 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import utils
+from crmsh import logtime
+import time
+import datetime
+import dateutil.tz
+
+
+def test_time_convert1():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ dt = utils.parse_time('Jun 01, 2015 10:00:00')
+ assert logtime.human_date(dt) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
+
+
+def test_time_convert2():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ ts = time.localtime(utils.parse_to_timestamp('Jun 01, 2015 10:00:00'))
+ assert time.strftime('%Y-%m-%d %H:%M:%S', ts) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
diff --git a/test/unittests/test_ui_cluster.py b/test/unittests/test_ui_cluster.py
new file mode 100644
index 0000000..a86936f
--- /dev/null
+++ b/test/unittests/test_ui_cluster.py
@@ -0,0 +1,173 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import ui_cluster
+
+logging.basicConfig(level=logging.INFO)
+
+class TestCluster(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ui_cluster_inst = ui_cluster.Cluster()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_do_start_already_started(self, mock_qdevice_configured, mock_parse_nodes, mock_active, mock_info):
+ mock_qdevice_configured.return_value = False
+ context_inst = mock.Mock()
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_active.side_effect = [True, True]
+ self.ui_cluster_inst.do_start(context_inst, "node1", "node2")
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node2")
+ ])
+ mock_info.assert_has_calls([
+ mock.call("The cluster stack already started on node1"),
+ mock.call("The cluster stack already started on node2")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_vote')
+ @mock.patch('crmsh.bootstrap.start_pacemaker')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_start(self, mock_parse_nodes, mock_active, mock_start, mock_qdevice_configured, mock_info, mock_start_pacemaker, mock_check_qdevice):
+ context_inst = mock.Mock()
+ mock_start_pacemaker.return_value = ["node1"]
+ mock_parse_nodes.return_value = ["node1"]
+ mock_active.side_effect = [False, False]
+ mock_qdevice_configured.return_value = True
+
+ self.ui_cluster_inst.do_start(context_inst, "node1")
+
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("corosync-qdevice.service", remote_addr="node1")
+ ])
+ mock_start.assert_called_once_with("corosync-qdevice", node_list=["node1"])
+ mock_qdevice_configured.assert_called_once_with()
+ mock_info.assert_called_once_with("The cluster stack started on node1")
+
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop_return(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [False, False]
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_dc.assert_not_called()
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ @mock.patch('crmsh.ui_cluster.Cluster._set_dlm')
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc,
+ mock_set_dlm, mock_service_manager, mock_info, mock_debug):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [True, False]
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.stop_service.side_effect = [["node1"], ["node1"], ["node1"]]
+ mock_service_manager_inst.service_is_active.return_value = True
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_debug.assert_called_once_with("stop node list: ['node1']")
+ mock_dc.assert_called_once_with("node1")
+ mock_set_dlm.assert_called_once_with("node1")
+ mock_service_manager_inst.stop_service.assert_has_calls([
+ mock.call("pacemaker", node_list=["node1"]),
+ mock.call("corosync-qdevice.service", node_list=["node1"]),
+ mock.call("corosync", node_list=["node1"]),
+ ])
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_corosync(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [False, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_pacemaker(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, True]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is True
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_info.assert_not_called()
diff --git a/test/unittests/test_upgradeuitl.py b/test/unittests/test_upgradeuitl.py
new file mode 100644
index 0000000..56c7284
--- /dev/null
+++ b/test/unittests/test_upgradeuitl.py
@@ -0,0 +1,54 @@
+import os
+import sys
+import unittest
+from unittest import mock
+
+from crmsh import upgradeutil
+
+
+class TestUpgradeCondition(unittest.TestCase):
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_force_upgrade(self, mock_stat: mock.MagicMock, mock_get_file_content):
+ mock_stat.return_value = mock.Mock(os.stat_result)
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_non_existent_seq(
+ self,
+ mock_stat: mock.MagicMock,
+ mock_get_file_content: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'0.1\n'
+ mock_current_upgrade_seq.__gt__.return_value = True
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_not_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'1.0\n'
+ mock_current_upgrade_seq.__gt__.return_value = False
+ self.assertFalse(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
diff --git a/test/unittests/test_utils.py b/test/unittests/test_utils.py
new file mode 100644
index 0000000..bf06fbd
--- /dev/null
+++ b/test/unittests/test_utils.py
@@ -0,0 +1,1514 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for utils.py
+
+import os
+import socket
+import re
+import imp
+import subprocess
+import unittest
+import pytest
+import logging
+from unittest import mock
+from itertools import chain
+
+import crmsh.utils
+from crmsh import utils, config, tmpfiles, constants, parallax
+
+logging.basicConfig(level=logging.DEBUG)
+
+def setup_function():
+ utils._ip_for_cloud = None
+ # Mock memoize method and reload the module under test later with imp
+ mock.patch('crmsh.utils.memoize', lambda x: x).start()
+ imp.reload(utils)
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_print_cluster_nodes(mock_run):
+ mock_run.return_value = (0, "data", None)
+ utils.print_cluster_nodes()
+ mock_run.assert_called_once_with("crm_node -l")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_package_is_installed_local(mock_run):
+ mock_run.return_value = (0, None)
+ res = utils.package_is_installed("crmsh")
+ assert res is True
+ mock_run.assert_called_once_with("rpm -q --quiet crmsh")
+
+
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included_target_not_exist(mock_detect):
+ mock_detect.side_effect = [True, False]
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is False
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included(mock_detect, mock_run):
+ mock_detect.side_effect = [True, True]
+ mock_run.side_effect = ["data data", "data"]
+
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is True
+
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+ mock_run.assert_has_calls([
+ mock.call("cat file2", host=None),
+ mock.call("cat file1", host=None)
+ ])
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None1(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (1, None)
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ res = utils.get_nodeid_from_name("node1")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_not_called()
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None2(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search.return_value = None
+ res = utils.get_nodeid_from_name("node111")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node111 ', mock_get_stdout.return_value[1], re.M)
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ mock_re_search_inst.group.return_value = '172168231'
+ res = utils.get_nodeid_from_name("node2")
+ assert res == '172168231'
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node2 ', mock_get_stdout.return_value[1], re.M)
+ mock_re_search_inst.group.assert_called_once_with(1)
+
+
+@mock.patch('crmsh.sh.LocalShell.get_rc_and_error')
+def test_check_ssh_passwd_need(mock_run):
+ mock_run.return_value = (1, 'foo')
+ res = utils.check_ssh_passwd_need("bob", "alice", "node1")
+ assert res is True
+ mock_run.assert_called_once_with(
+ "bob",
+ " ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15 -T -o Batchmode=yes alice@node1 true",
+ )
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_cluster_run_cmd_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.cluster_run_cmd("test")
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.list_cluster_nodes_except_me()
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.this_node')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me(mock_list_nodes, mock_this_node):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.list_cluster_nodes_except_me()
+ assert res == ["node2"]
+ mock_list_nodes.assert_called_once_with()
+ mock_this_node.assert_called_once_with()
+
+
+def test_to_ascii():
+ assert utils.to_ascii(None) is None
+ assert utils.to_ascii('test') == 'test'
+ assert utils.to_ascii(b'test') == 'test'
+ # Test not utf-8 characters
+ with mock.patch('traceback.print_exc') as mock_traceback:
+ assert utils.to_ascii(b'te\xe9st') == 'test'
+ mock_traceback.assert_called_once_with()
+
+
+def test_systeminfo():
+ assert utils.getuser() is not None
+ assert utils.gethomedir() is not None
+ assert utils.get_tempdir() is not None
+
+
+def test_shadowcib():
+ assert utils.get_cib_in_use() == ""
+ utils.set_cib_in_use("foo")
+ assert utils.get_cib_in_use() == "foo"
+ utils.clear_cib_in_use()
+ assert utils.get_cib_in_use() == ""
+
+
+def test_booleans():
+ truthy = ['yes', 'Yes', 'True', 'true', 'TRUE',
+ 'YES', 'on', 'On', 'ON']
+ falsy = ['no', 'false', 'off', 'OFF', 'FALSE', 'nO']
+ not_truthy = ['', 'not', 'ONN', 'TRUETH', 'yess']
+ for case in chain(truthy, falsy):
+ assert utils.verify_boolean(case) is True
+ for case in truthy:
+ assert utils.is_boolean_true(case) is True
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is True
+ for case in falsy:
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is True
+ assert utils.get_boolean(case, dflt=True) is False
+ for case in not_truthy:
+ assert utils.verify_boolean(case) is False
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is False
+
+
+def test_olist():
+ lst = utils.olist(['B', 'C', 'A'])
+ lst.append('f')
+ lst.append('aA')
+ lst.append('_')
+ assert 'aa' in lst
+ assert 'a' in lst
+ assert list(lst) == ['b', 'c', 'a', 'f', 'aa', '_']
+
+
+def test_add_sudo():
+ tmpuser = config.core.user
+ try:
+ config.core.user = 'root'
+ assert utils.add_sudo('ls').startswith('sudo')
+ config.core.user = ''
+ assert utils.add_sudo('ls') == 'ls'
+ finally:
+ config.core.user = tmpuser
+
+
+def test_str2tmp():
+ txt = "This is a test string"
+ filename = utils.str2tmp(txt)
+ assert os.path.isfile(filename)
+ assert open(filename).read() == txt + "\n"
+ assert utils.file2str(filename) == txt
+ os.unlink(filename)
+
+
+@mock.patch('logging.Logger.error')
+def test_sanity(mock_error):
+ sane_paths = ['foo/bar', 'foo', '/foo/bar', 'foo0',
+ 'foo_bar', 'foo-bar', '0foo', '.foo',
+ 'foo.bar']
+ insane_paths = ['#foo', 'foo?', 'foo*', 'foo$', 'foo[bar]',
+ 'foo`', "foo'", 'foo/*']
+ for p in sane_paths:
+ assert utils.is_path_sane(p)
+ for p in insane_paths:
+ assert not utils.is_path_sane(p)
+ sane_filenames = ['foo', '0foo', '0', '.foo']
+ insane_filenames = ['foo/bar']
+ for p in sane_filenames:
+ assert utils.is_filename_sane(p)
+ for p in insane_filenames:
+ assert not utils.is_filename_sane(p)
+ sane_names = ['foo']
+ insane_names = ["f'o"]
+ for n in sane_names:
+ assert utils.is_name_sane(n)
+ for n in insane_names:
+ assert not utils.is_name_sane(n)
+
+
+def test_nvpairs2dict():
+ assert utils.nvpairs2dict(['a=b', 'c=d']) == {'a': 'b', 'c': 'd'}
+ assert utils.nvpairs2dict(['a=b=c', 'c=d']) == {'a': 'b=c', 'c': 'd'}
+ assert utils.nvpairs2dict(['a']) == {'a': None}
+
+
+def test_validity():
+ assert utils.is_id_valid('foo0')
+ assert not utils.is_id_valid('0foo')
+
+
+def test_msec():
+ assert utils.crm_msec('1ms') == 1
+ assert utils.crm_msec('1s') == 1000
+ assert utils.crm_msec('1us') == 0
+ assert utils.crm_msec('1') == 1000
+ assert utils.crm_msec('1m') == 60*1000
+ assert utils.crm_msec('1h') == 60*60*1000
+
+
+def test_parse_sysconfig():
+ """
+ bsc#1129317: Fails on this line
+
+ FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+ """
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ sc = utils.parse_sysconfig(fname)
+ assert ("FW_SERVICES_ACCEPT_EXT" in sc)
+
+def test_sysconfig_set():
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, FW_SERVICES_ACCEPT_EXT="foo=bar", FOO="bar")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("FW_SERVICES_ACCEPT_EXT") == "foo=bar")
+ assert (sc.get("FOO") == "bar")
+
+def test_sysconfig_set_bsc1145823():
+ s = '''# this is test
+#age=1000
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, age="100")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("age") == "100")
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_false(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = False
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 1
+
+ assert utils.check_port_open("10.10.10.1", 22) is False
+
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+ mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("10.10.10.1", 22))
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_true(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = True
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 0
+
+ assert utils.check_port_open("2001:db8:10::7", 22) is True
+
+ mock_is_ipv6.assert_called_once_with("2001:db8:10::7")
+ mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("2001:db8:10::7", 22))
+
+def test_valid_port():
+ assert utils.valid_port(1) is False
+ assert utils.valid_port(10000000) is False
+ assert utils.valid_port(1234) is True
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_false(mock_get_value):
+ mock_get_value.return_value = "ip"
+ assert utils.is_qdevice_configured() is False
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_true(mock_get_value):
+ mock_get_value.return_value = "net"
+ assert utils.is_qdevice_configured() is True
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_false(mock_get_value):
+ mock_get_value.return_value = "off"
+ assert utils.is_qdevice_tls_on() is False
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_true(mock_get_value):
+ mock_get_value.return_value = "on"
+ assert utils.is_qdevice_tls_on() is True
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool_return_none(mock_get_stdout):
+ mock_get_stdout.return_value = (1, None)
+ assert bool(utils.get_nodeinfo_from_cmaptool()) is False
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+
+@mock.patch("re.findall")
+@mock.patch("re.search")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool(mock_get_stdout, mock_search, mock_findall):
+ mock_get_stdout.return_value = (0, 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)\nruntime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ match_inst1 = mock.Mock()
+ match_inst2 = mock.Mock()
+ mock_search.side_effect = [match_inst1, match_inst2]
+ match_inst1.group.return_value = '1'
+ match_inst2.group.return_value = '2'
+ mock_findall.side_effect = [["192.168.43.129"], ["192.168.43.128"]]
+
+ result = utils.get_nodeinfo_from_cmaptool()
+ assert result['1'] == ["192.168.43.129"]
+ assert result['2'] == ["192.168.43.128"]
+
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+ mock_search.assert_has_calls([
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+ match_inst1.group.assert_called_once_with(1)
+ match_inst2.group.assert_called_once_with(1)
+ mock_findall.assert_has_calls([
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false_service_not_active(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = False
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_not_called()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_true(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("2") is True
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_aws() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_xen(mock_run):
+ mock_run.side_effect = ["4.2.amazon", "Xen"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_kvm(mock_run):
+ mock_run.side_effect = ["Not Specified", "Amazon EC2"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_azure() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_microsoft_corporation(mock_run, mock_request):
+ mock_run.side_effect = ["microsoft corporation", "test"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_chassis(mock_run, mock_request):
+ mock_run.side_effect = ["test", "7783-7084-3265-9085-8269-3286-77"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp_false(mock_run):
+ mock_run.return_value = "test"
+ assert utils.detect_gcp() is False
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp(mock_run, mock_request):
+ mock_run.return_value = "Google instance"
+ mock_request.return_value = "data"
+ assert utils.detect_gcp() is True
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_no_cmd(mock_is_program):
+ mock_is_program.return_value = False
+ assert utils.detect_cloud() is None
+ mock_is_program.assert_called_once_with("dmidecode")
+
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_aws(mock_is_program, mock_aws):
+ mock_is_program.return_value = True
+ mock_aws.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AWS
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_azure(mock_is_program, mock_aws, mock_azure):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AZURE
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_gcp")
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_gcp(mock_is_program, mock_aws, mock_azure, mock_gcp):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = False
+ mock_gcp.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_GCP
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+ mock_gcp.assert_called_once_with()
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_interface_choice(mock_get_stdout):
+ ip_a_output = """
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
+ link/ether 52:54:00:9e:1b:4f brd ff:ff:ff:ff:ff:ff
+ inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fe9e:1b4f/64 scope link
+ valid_lft forever preferred_lft forever
+3: br-933fa0e1438c: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
+ link/ether 9e:fe:24:df:59:49 brd ff:ff:ff:ff:ff:ff
+ inet 10.10.10.1/24 brd 10.10.10.255 scope global br-933fa0e1438c
+ valid_lft forever preferred_lft forever
+ inet6 fe80::9cfe:24ff:fedf:5949/64 scope link
+ valid_lft forever preferred_lft forever
+4: veth3fff6e9@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
+ link/ether 1e:2c:b3:73:6b:42 brd ff:ff:ff:ff:ff:ff link-netnsid 0
+ inet6 fe80::1c2c:b3ff:fe73:6b42/64 scope link
+ valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
+"""
+ mock_get_stdout.return_value = (0, ip_a_output)
+ assert utils.interface_choice() == ["enp1s0", "br-933fa0e1438c", "veth3fff6e9"]
+ mock_get_stdout.assert_called_once_with("ip a")
+
+
+class TestIP(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ip_inst = utils.IP("10.10.10.1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('ipaddress.ip_address')
+ def test_ip_address(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock()
+ mock_ip_address.return_value = mock_ip_address_inst
+ self.ip_inst.ip_address
+ mock_ip_address.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_version(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(version=4)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.version
+ self.assertEqual(res, mock_ip_address_inst.version)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_mcast(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_multicast=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = utils.IP.is_mcast("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.version', new_callable=mock.PropertyMock)
+ def test_is_ipv6(self, mock_version):
+ mock_version.return_value = 4
+ res = utils.IP.is_ipv6("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_version.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_loopback(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_loopback=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.is_loopback
+ self.assertEqual(res, mock_ip_address_inst.is_loopback)
+ mock_ip_address.assert_called_once_with()
+
+
+class TestInterface(unittest.TestCase):
+ """
+ Unitary tests for class utils.Interface
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interface = utils.Interface("10.10.10.123/24")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_ip_with_mask(self):
+ assert self.interface.ip_with_mask == "10.10.10.123/24"
+
+ @mock.patch('ipaddress.ip_interface')
+ def test_ip_interface(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ self.interface.ip_interface
+ mock_ip_interface.assert_called_once_with("10.10.10.123/24")
+
+ @mock.patch('crmsh.utils.Interface.ip_interface', new_callable=mock.PropertyMock)
+ def test_network(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ mock_ip_interface_inst.network = mock.Mock(network_address="10.10.10.0")
+ assert self.interface.network == "10.10.10.0"
+ mock_ip_interface.assert_called_once_with()
+
+
+class TestInterfacesInfo(unittest.TestCase):
+ """
+ Unitary tests for class utils.InterfacesInfo
+ """
+
+ network_output_error = """1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
+2: enp1s0 inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+61: tun0 inet 10.163.45.46 peer 10.163.45.45/32 scope global tun0"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interfaces_info = utils.InterfacesInfo()
+ self.interfaces_info_with_second_hb = utils.InterfacesInfo(second_heartbeat=True)
+ self.interfaces_info_with_custom_nic = utils.InterfacesInfo(second_heartbeat=True, custom_nic_list=['eth1'])
+ self.interfaces_info_with_wrong_nic = utils.InterfacesInfo(custom_nic_list=['eth7'])
+ self.interfaces_info_fake = utils.InterfacesInfo()
+ self.interfaces_info_fake._nic_info_dict = {
+ "eth0": [mock.Mock(ip="10.10.10.1", network="10.10.10.0"), mock.Mock(ip="10.10.10.2", network="10.10.10.0")],
+ "eth1": [mock.Mock(ip="20.20.20.1", network="20.20.20.0")]
+ }
+ self.interfaces_info_fake._default_nic_list = ["eth7"]
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_no_address(self, mock_run):
+ only_lo = "1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever"
+ mock_run.return_value = (0, only_lo, None)
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info.get_interfaces_info()
+ self.assertEqual("No address configured", str(err.exception))
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+
+ @mock.patch('crmsh.utils.Interface')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_one_addr(self, mock_run, mock_interface):
+ mock_run.return_value = (0, self.network_output_error, None)
+ mock_interface_inst_1 = mock.Mock(is_loopback=True, is_link_local=False)
+ mock_interface_inst_2 = mock.Mock(is_loopback=False, is_link_local=False)
+ mock_interface.side_effect = [mock_interface_inst_1, mock_interface_inst_2]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_second_hb.get_interfaces_info()
+ self.assertEqual("Cannot configure second heartbeat, since only one address is available", str(err.exception))
+
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+ mock_interface.assert_has_calls([
+ mock.call("127.0.0.1/8"),
+ mock.call("192.168.122.241/24")
+ ])
+
+ def test_nic_list(self):
+ res = self.interfaces_info_fake.nic_list
+ self.assertEqual(res, ["eth0", "eth1"])
+
+ def test_interface_list(self):
+ res = self.interfaces_info_fake.interface_list
+ assert len(res) == 3
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_ip_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(ip="10.10.10.1"),
+ mock.Mock(ip="10.10.10.2")
+ ]
+ res = self.interfaces_info_fake.ip_list
+ self.assertEqual(res, ["10.10.10.1", "10.10.10.2"])
+ mock_interface_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_get_local_ip_list(self, mock_get_info, mock_ip_list):
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.get_local_ip_list(False)
+ self.assertEqual(res, mock_ip_list.return_value)
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_ip_in_local(self, mock_get_info, mock_is_ipv6, mock_ip_list):
+ mock_is_ipv6.return_value = False
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.ip_in_local("10.10.10.1")
+ assert res is True
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_network_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(network="10.10.10.0"),
+ mock.Mock(network="20.20.20.0")
+ ]
+ res = self.interfaces_info.network_list
+ self.assertEqual(res, list(set(["10.10.10.0", "20.20.20.0"])))
+ mock_interface_list.assert_called_once_with()
+
+ def test_nic_first_ip(self):
+ res = self.interfaces_info_fake._nic_first_ip("eth0")
+ self.assertEqual(res, "10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route_no_default(self, mock_run, mock_get_interfaces_info, mock_warn, mock_nic_list):
+ output = """10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51"""
+ mock_run.return_value = (0, output, None)
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth0"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+ mock_warn.assert_called_once_with("No default route configured. Using the first found nic")
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route(self, mock_run):
+ output = """default via 192.168.122.1 dev eth8 proto dhcp
+ 10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51
+ 192.168.122.0/24 dev eth8 proto kernel scope link src 192.168.122.120"""
+ mock_run.return_value = (0, output, None)
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth8"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list_failed_detect(self, mock_nic_list):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_wrong_nic.get_default_ip_list()
+ self.assertEqual("Failed to detect IP address for eth7", str(err.exception))
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.utils.InterfacesInfo._nic_first_ip')
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list(self, mock_nic_list, mock_first_ip):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"], ["eth0", "eth1"]]
+ mock_first_ip.side_effect = ["10.10.10.1", "20.20.20.1"]
+
+ res = self.interfaces_info_with_custom_nic.get_default_ip_list()
+ self.assertEqual(res, ["10.10.10.1", "20.20.20.1"])
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_first_ip.assert_has_calls([mock.call("eth1"), mock.call("eth0")])
+
+
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeid(mock_get_nodeid):
+ mock_get_nodeid.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeinfo(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = {"1": ["10.10.10.1"], "2": ["10.10.10.2"]}
+ res = utils.get_iplist_from_name("test")
+ assert res == ["10.10.10.1"]
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_ping_node(mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with pytest.raises(ValueError) as err:
+ utils.ping_node("node_unreachable")
+ assert str(err.value) == 'host "node_unreachable" is unreachable: error data'
+ mock_run.assert_called_once_with("ping -c 1 node_unreachable")
+
+
+def test_calculate_quorate_status():
+ assert utils.calculate_quorate_status(3, 2) is True
+ assert utils.calculate_quorate_status(3, 1) is False
+
+
+@mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+def test_get_quorum_votes_dict(mock_run):
+ mock_run.return_value = """
+Votequorum information
+----------------------
+Expected votes: 1
+Highest expected: 1
+Total votes: 1
+Quorum: 1
+Flags: Quorate
+ """
+ res = utils.get_quorum_votes_dict()
+ assert res == {'Expected': '1', 'Total': '1'}
+ mock_run.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+def test_re_split_string():
+ assert utils.re_split_string('[; ]', "/dev/sda1; /dev/sdb1 ; ") == ["/dev/sda1", "/dev/sdb1"]
+ assert utils.re_split_string('[; ]', "/dev/sda1 ") == ["/dev/sda1"]
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_has_dev_partitioned(mock_get_dev_info):
+ mock_get_dev_info.return_value = """
+disk
+part
+ """
+ res = utils.has_dev_partitioned("/dev/sda1")
+ assert res is True
+ mock_get_dev_info.assert_called_once_with("/dev/sda1", "NAME", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_local(mock_get_dev_uuid):
+ mock_get_dev_uuid.return_value = ""
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on local"
+ mock_get_dev_uuid.assert_called_once_with("/dev/sdb1")
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_peer(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", ""]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", "5678"]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "UUID of /dev/sdb1 not same with peer node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_used_for_lvm(mock_dev_info):
+ mock_dev_info.return_value = "lvm"
+ res = utils.is_dev_used_for_lvm("/dev/sda1")
+ assert res is True
+ mock_dev_info.assert_called_once_with("/dev/sda1", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_a_plain_raw_disk_or_partition(mock_dev_info):
+ mock_dev_info.return_value = "raid1\nlvm"
+ res = utils.is_dev_a_plain_raw_disk_or_partition("/dev/md127")
+ assert res is False
+ mock_dev_info.assert_called_once_with("/dev/md127", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_dev_info(mock_run):
+ mock_run.return_value = "data"
+ res = utils.get_dev_info("/dev/sda1", "TYPE")
+ assert res == "data"
+ mock_run.assert_called_once_with("lsblk -fno TYPE /dev/sda1", None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_fs_type(mock_get_info):
+ mock_get_info.return_value = "data"
+ res = utils.get_dev_fs_type("/dev/sda1")
+ assert res == "data"
+ mock_get_info.assert_called_once_with("/dev/sda1", "FSTYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_uuid(mock_get_info):
+ mock_get_info.return_value = "uuid"
+ res = utils.get_dev_uuid("/dev/sda1")
+ assert res == "uuid"
+ mock_get_info.assert_called_once_with("/dev/sda1", "UUID", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number_except(mock_run):
+ mock_run.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.get_pe_number("vg1")
+ assert str(err.value) == "Cannot find PE on VG(vg1)"
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number(mock_run):
+ mock_run.return_value = """
+PE Size 4.00 MiB
+Total PE 1534
+Alloc PE / Size 1534 / 5.99 GiB
+ """
+ res = utils.get_pe_number("vg1")
+ assert res == 1534
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_all_vg_name(mock_run):
+ mock_run.return_value = """
+--- Volume group ---
+ VG Name ocfs2-vg
+ System ID
+ """
+ res = utils.get_all_vg_name()
+ assert res == ["ocfs2-vg"]
+ mock_run.assert_called_once_with("vgdisplay")
+
+
+@mock.patch('crmsh.utils.randomword')
+def test_gen_unused_id(mock_rand):
+ mock_rand.return_value = "1234xxxx"
+ res = utils.gen_unused_id(["test-id"], "test-id")
+ assert res == "test-id-1234xxxx"
+ mock_rand.assert_called_once_with(6)
+
+
+@mock.patch('random.choice')
+def test_randomword(mock_rand):
+ import string
+ mock_rand.side_effect = ['z', 'f', 'k', 'e', 'c', 'd']
+ res = utils.randomword()
+ assert res == "zfkecd"
+ mock_rand.assert_has_calls([mock.call(string.ascii_lowercase) for x in range(6)])
+
+
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_all_exist_id(mock_cib):
+ mock_cib.refresh = mock.Mock()
+ mock_cib.id_list = mock.Mock()
+ mock_cib.id_list.return_value = ['1', '2']
+ res = utils.all_exist_id()
+ assert res == ['1', '2']
+ mock_cib.id_list.assert_called_once_with()
+ mock_cib.refresh.assert_called_once_with()
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_mount_point_used(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_mount_point_used("/opt")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_disk_mounted(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_disk_mounted("/dev/vda2")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_stonith_running(mock_run, mock_diskless):
+ mock_run.return_value = """
+stonith-sbd
+1 fence device found
+ """
+ mock_diskless.return_value = True
+ res = utils.has_stonith_running()
+ assert res is True
+ mock_run.assert_called_once_with("stonith_admin -L")
+ mock_diskless.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device_error(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.side_effect = OSError
+ res = utils.is_block_device("/dev/sda1")
+ assert res is False
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.return_value = True
+ res = utils.is_block_device("/dev/sda1")
+ assert res is True
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.ping_node')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_check_all_nodes_reachable(mock_run, mock_ping):
+ mock_run.return_value = "1084783297 15sp2-1 member"
+ utils.check_all_nodes_reachable()
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_ping.assert_called_once_with("15sp2-1")
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_detect_virt(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.detect_virt() is True
+ mock_run.assert_called_once_with("systemd-detect-virt")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_is_standby(mock_run):
+ mock_run.return_value = """
+Node List:
+* Node 15sp2-1: standby
+ """
+ assert utils.is_standby("15sp2-1") is True
+ mock_run.assert_called_once_with("crm_mon -1")
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_dlm_option_dict(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+key1=value1
+key2=value2
+ """
+ res_dict = utils.get_dlm_option_dict()
+ assert res_dict == {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("dlm_tool dump_config", None)
+
+
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option_exception(mock_get_dict):
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ with pytest.raises(ValueError) as err:
+ utils.set_dlm_option(name="xin")
+ assert str(err.value) == '"name" is not dlm config option'
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option(mock_get_dict, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ utils.set_dlm_option(key2="test")
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with('dlm_tool set_config "key2=test"', None)
+
+
+@mock.patch('crmsh.utils.has_resource_configured')
+def test_is_dlm_configured(mock_configured):
+ mock_configured.return_value = True
+ assert utils.is_dlm_configured() is True
+ mock_configured.assert_called_once_with(constants.DLM_CONTROLD_RA, peer=None)
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate_exception(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.is_quorate()
+ assert str(err.value) == "Failed to get quorate status from corosync-quorumtool"
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+Ring ID: 1084783297/440
+Quorate: Yes
+ """
+ assert utils.is_quorate() is True
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none_no_reg(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes(no_reg=True)
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=True)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_cib_not_exist(mock_run, mock_env, mock_isfile):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = False
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+
+
+@mock.patch('crmsh.xmlutil.file2cib_elem')
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes(mock_run, mock_env, mock_isfile, mock_file2elem):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = True
+ mock_cib_inst = mock.Mock()
+ mock_file2elem.return_value = mock_cib_inst
+ mock_node_inst1 = mock.Mock()
+ mock_node_inst2 = mock.Mock()
+ mock_node_inst1.get.side_effect = ["node1", "remote"]
+ mock_node_inst2.get.side_effect = ["node2", "member"]
+ mock_cib_inst.xpath.side_effect = [[mock_node_inst1, mock_node_inst2], "data"]
+
+ res = utils.list_cluster_nodes()
+ assert res == ["node2"]
+
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_file2elem.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_cib_inst.xpath.assert_has_calls([
+ mock.call(constants.XML_NODE_PATH),
+ mock.call("//primitive[@id='node1']/instance_attributes/nvpair[@name='server']")
+ ])
+
+
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_property(mock_run, mock_env):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", "")
+ mock_env.return_value = "cib.xml"
+ assert utils.get_property("no-quorum-policy") == "data"
+ mock_run_inst.get_rc_stdout_stderr_without_input.assert_called_once_with(None, "CIB_file=cib.xml sudo --preserve-env=CIB_file crm configure get_property no-quorum-policy")
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property(mock_get, mock_run, mock_warn):
+ mock_get.return_value = "start"
+ utils.set_property("no-quorum-policy", "stop")
+ mock_run.assert_called_once_with("crm configure property no-quorum-policy=stop")
+ mock_warn.assert_called_once_with('"no-quorum-policy" in crm_config is set to stop, it was start')
+
+
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_the_same(mock_get):
+ mock_get.return_value = "value1"
+ utils.set_property("no-quorum-policy", "value1")
+ mock_get.assert_called_once_with("no-quorum-policy", "crm_config")
+
+
+@mock.patch('crmsh.utils.crm_msec')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_conditional(mock_get, mock_msec):
+ mock_get.return_value = "10s"
+ mock_msec.side_effect = ["1000", "1000"]
+ utils.set_property("timeout", "10", conditional=True)
+ mock_get.assert_called_once_with("timeout", "crm_config")
+ mock_msec.assert_has_calls([mock.call("10s"), mock.call("10")])
+
+
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm_return(mock_dlm):
+ mock_dlm.return_value = False
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.get_property')
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm(mock_dlm, mock_get_property, mock_warn):
+ mock_dlm.return_value = True
+ mock_get_property.return_value = "stop"
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+ mock_get_property.assert_called_once_with("no-quorum-policy")
+ mock_warn.assert_called_once_with('The DLM cluster best practice suggests to set the cluster property "no-quorum-policy=freeze"')
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_is_2node_cluster_without_qdevice(mock_list, mock_is_qdevice):
+ mock_list.return_value = ["node1", "node2"]
+ mock_is_qdevice.return_value = False
+ res = utils.is_2node_cluster_without_qdevice()
+ assert res is True
+ mock_list.assert_called_once_with()
+ mock_is_qdevice.assert_called_once_with()
+
+
+def test_get_systemd_timeout_start_in_sec():
+ res = utils.get_systemd_timeout_start_in_sec("1min 31s")
+ assert res == 91
+
+
+@mock.patch('crmsh.utils.is_larger_than_min_version')
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_is_ocf_1_1_cib_schema_detected(mock_cib, mock_larger):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_cib.get_schema = mock.Mock()
+ mock_cib.get_schema.return_value = "pacemaker-3.5"
+ mock_larger.return_value = True
+ assert utils.is_ocf_1_1_cib_schema_detected() is True
+ mock_cib.get_schema.assert_called_once_with()
+ mock_larger.assert_called_once_with("pacemaker-3.5", constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1(mock_support, mock_warn):
+ mock_support.return_value = False
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Master"
+ mock_support.assert_called_once_with()
+ mock_warn.assert_called_once_with('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', "Promoted", "Master", constants.CIB_UPGRADE)
+
+
+@mock.patch('logging.Logger.info')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_convert_new(mock_support, mock_info):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Master") == "Promoted"
+ mock_support.assert_called_once_with()
+ mock_info.assert_called_once_with('Convert deprecated "%s" to "%s"', "Master", "Promoted")
+
+
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_return(mock_support):
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Promoted"
+ mock_support.assert_called_once_with()
+
+
+def test_handle_role_for_ocf_1_1_return_not_role():
+ assert utils.handle_role_for_ocf_1_1("test", name='other') == "test"
+
+
+def test_compatible_role():
+ assert utils.compatible_role("Slave", "Unpromoted") is True
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_fetch_cluster_node_list_from_node(mock_run, mock_warn):
+ mock_run.return_value = """
+
+ 1 node1
+ 2 node2 lost
+ 3 node3 member
+ """
+ assert utils.fetch_cluster_node_list_from_node("node1") == ["node3"]
+ mock_run.assert_called_once_with("crm_node -l", "node1")
+ mock_warn.assert_has_calls([
+ mock.call("The node '%s' has no known name and/or state information", "1"),
+ mock.call("The node '%s'(state '%s') is not a current member", "node2", "lost")
+ ])
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+def test_cluster_copy_file_return(mock_list_nodes):
+ mock_list_nodes.return_value = []
+ assert utils.cluster_copy_file("/file1") == True
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_has_sudo_access(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.has_sudo_access() is True
+ mock_run.assert_called_once_with("sudo -S -k -n id -u")
+
+
+@mock.patch('grp.getgrgid')
+@mock.patch('os.getgroups')
+def test_in_haclient(mock_group, mock_getgrgid):
+ mock_group.return_value = [90, 100]
+ mock_getgrgid_inst1 = mock.Mock(gr_name=constants.HA_GROUP)
+ mock_getgrgid_inst2 = mock.Mock(gr_name="other")
+ mock_getgrgid.side_effect = [mock_getgrgid_inst1, mock_getgrgid_inst2]
+ assert utils.in_haclient() is True
+ mock_group.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_root(mock_user, mock_in):
+ mock_user.return_value = 'root'
+ utils.check_user_access('cluster')
+ mock_in.assert_not_called()
+
+
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_haclient(mock_user, mock_in, mock_sudo):
+ mock_user.return_value = 'user'
+ mock_in.return_value = True
+ utils.check_user_access('ra')
+ mock_sudo.assert_not_called()
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_need_sudo(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = True
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo"')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_acl(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('This command needs higher privilege.\nOption 1) Please consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'\nOption 2) Add "user" to the haclient group. For example:\n sudo usermod -g haclient user')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_cluster(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('cluster')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo".\nCurrently, this command needs to use sudo to escalate itself as root.\nPlease consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'')
diff --git a/test/unittests/test_watchdog.py b/test/unittests/test_watchdog.py
new file mode 100644
index 0000000..957f21f
--- /dev/null
+++ b/test/unittests/test_watchdog.py
@@ -0,0 +1,311 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import watchdog
+from crmsh import bootstrap
+from crmsh import constants
+
+
+class TestWatchdog(unittest.TestCase):
+ """
+ Unitary tests for crmsh.watchdog.Watchdog
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.watchdog_inst = watchdog.Watchdog()
+ self.watchdog_join_inst = watchdog.Watchdog(remote_user="alice", peer_host="node1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_watchdog_device_name(self):
+ res = self.watchdog_inst.watchdog_device_name
+ assert res is None
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_ignore_error(self, mock_run):
+ mock_run.return_value = (1, None, "error")
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog", True)
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError) as err:
+ self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ mock_error.assert_called_once_with("Invalid watchdog device /dev/watchdog: error")
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.watchdog.invoke')
+ def test_load_watchdog_driver(self, mock_invoke):
+ self.watchdog_inst._load_watchdog_driver("softdog")
+ mock_invoke.assert_has_calls([
+ mock.call("echo softdog > /etc/modules-load.d/watchdog.conf"),
+ mock.call("systemctl restart systemd-modules-load")
+ ])
+
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ def test_get_watchdog_device_from_sbd_config(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/watchdog"
+ res = self.watchdog_inst._get_watchdog_device_from_sbd_config()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_parse.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_driver_is_loaded(self, mock_run):
+ output = """
+button 24576 0
+softdog 16384 2
+btrfs 1474560 1
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_inst._driver_is_loaded("softdog")
+ assert res is not None
+ mock_run.assert_called_once_with("lsmod")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ self.watchdog_inst._set_watchdog_info()
+ mock_run.assert_called_once_with(watchdog.Watchdog.QUERY_CMD)
+ mock_error.assert_called_once_with("Failed to run {}: error".format(watchdog.Watchdog.QUERY_CMD))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ self.watchdog_inst._set_watchdog_info()
+ self.assertEqual(self.watchdog_inst._watchdog_info_dict, {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'})
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver_none(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = False
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, None)
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = True
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, "/dev/watchdog1")
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ self.watchdog_join_inst._get_driver_through_device_remotely("test")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+ mock_error.assert_called_once_with("Failed to run sudo sbd query-watchdog remotely: error")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_none(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, None)
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, "softdog")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ def test_get_first_unused_device_none(self):
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, None)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_first_unused_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_verify.assert_called_once_with("/dev/watchdog", ignore_error=True)
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input_from_config(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_verify.return_value = True
+ self.watchdog_inst._set_input()
+ mock_first.assert_not_called()
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = None
+ mock_first.return_value = None
+ self.watchdog_inst._set_input()
+ self.assertEqual(self.watchdog_inst._input, "softdog")
+ mock_from_config.assert_called_once_with()
+ mock_verify.assert_not_called()
+ mock_first.assert_called_once_with()
+
+ def test_valid_device_false(self):
+ res = self.watchdog_inst._valid_device("test")
+ self.assertEqual(res, False)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_valid_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._valid_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog_error(self, mock_set_info, mock_from_config, mock_error):
+ mock_from_config.return_value = None
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.watchdog_join_inst.join_watchdog()
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_error.assert_called_once_with("Failed to get watchdog device from {}".format(bootstrap.SYSCONFIG_SBD))
+
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._get_driver_through_device_remotely')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog(self, mock_set_info, mock_from_config, mock_valid, mock_get_driver_remotely, mock_load):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_valid.return_value = False
+ mock_get_driver_remotely.return_value = "softdog"
+
+ self.watchdog_join_inst.join_watchdog()
+
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+ mock_get_driver_remotely.assert_called_once_with("/dev/watchdog")
+ mock_load.assert_called_once_with("softdog")
+
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_valid(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc):
+ mock_valid.return_value = True
+ self.watchdog_inst._input = "/dev/watchdog"
+ self.watchdog_inst.init_watchdog()
+ mock_invokerc.assert_not_called()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_error(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_error):
+ mock_valid.return_value = False
+ mock_invokerc.return_value = False
+ self.watchdog_inst._input = "test"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("test")
+ mock_invokerc.assert_called_once_with("modinfo test")
+ mock_error.assert_called_once_with("Should provide valid watchdog device or driver name by -w option")
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_device_through_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._driver_is_loaded')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_is_loaded, mock_load, mock_get_device):
+ mock_valid.return_value = False
+ self.watchdog_inst._input = "softdog"
+ mock_invokerc.return_value = True
+ mock_is_loaded.return_value = False
+ mock_get_device.return_value = "/dev/watchdog"
+
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("softdog")
+ mock_invokerc.assert_called_once_with("modinfo softdog")
+ mock_is_loaded.assert_called_once_with("softdog")
+ mock_load.assert_called_once_with("softdog")
+ mock_set_info.assert_has_calls([mock.call(), mock.call()])
+ mock_get_device.assert_called_once_with("softdog")
diff --git a/test/unittests/test_xmlutil.py b/test/unittests/test_xmlutil.py
new file mode 100644
index 0000000..48393bf
--- /dev/null
+++ b/test/unittests/test_xmlutil.py
@@ -0,0 +1,61 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import xmlutil, constants
+
+
+class TestCrmMonXmlParser(unittest.TestCase):
+ """
+ Unitary tests for crmsh.xmlutil.CrmMonXmlParser
+ """
+ @mock.patch('crmsh.sh.cluster_shell')
+ def setUp(self, mock_cluster_shell):
+ """
+ Test setUp.
+ """
+ data = '''
+<data>
+ <nodes>
+ <node name="tbw-1" id="1084783148" online="true" standby="true" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="3" type="member"/>
+ <node name="tbw-2" id="1084783312" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+ </nodes>
+ <resources>
+ <resource id="ocfs2-dlm" resource_agent="ocf::pacemaker:controld" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ <resource id="ocfs2-clusterfs" resource_agent="ocf::heartbeat:Filesystem" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ </resources>
+</data>
+ '''
+ mock_cluster_shell().get_rc_stdout_stderr_without_input.return_value = (0, data, '')
+ self.parser_inst = xmlutil.CrmMonXmlParser()
+
+ def test_is_node_online(self):
+ assert self.parser_inst.is_node_online("tbw-1") is True
+ assert self.parser_inst.is_node_online("tbw-2") is False
+
+ def test_get_node_list(self):
+ assert self.parser_inst.get_node_list("standby") == ['tbw-1']
+ assert self.parser_inst.get_node_list("online") == ['tbw-2']
+
+ def test_is_resource_configured(self):
+ assert self.parser_inst.is_resource_configured("test") is False
+ assert self.parser_inst.is_resource_configured("ocf::heartbeat:Filesystem") is True
+
+ def test_is_any_resource_running(self):
+ assert self.parser_inst.is_any_resource_running() is True
+
+ def test_is_resource_started(self):
+ assert self.parser_inst.is_resource_started("test") is False
+ assert self.parser_inst.is_resource_started("ocfs2-clusterfs") is True
+ assert self.parser_inst.is_resource_started("ocf::pacemaker:controld") is True
+
+ def test_get_resource_id_list_via_type(self):
+ assert self.parser_inst.get_resource_id_list_via_type("test") == []
+ assert self.parser_inst.get_resource_id_list_via_type("ocf::pacemaker:controld")[0] == "ocfs2-dlm"