summaryrefslogtreecommitdiffstats
path: root/cts/scheduler/xml/stop-all-resources.xml
diff options
context:
space:
mode:
Diffstat (limited to 'cts/scheduler/xml/stop-all-resources.xml')
-rw-r--r--cts/scheduler/xml/stop-all-resources.xml107
1 files changed, 107 insertions, 0 deletions
diff --git a/cts/scheduler/xml/stop-all-resources.xml b/cts/scheduler/xml/stop-all-resources.xml
new file mode 100644
index 0000000..6ecd4d6
--- /dev/null
+++ b/cts/scheduler/xml/stop-all-resources.xml
@@ -0,0 +1,107 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May 5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ <nvpair id="cib-bootstrap-options-stop-all-resources" name="stop-all-resources" value="true"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <clone id="ping-clone">
+ <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-instance_attributes">
+ <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
+ <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
+ <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
+ </instance_attributes>
+ <operations>
+ <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
+ <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+ <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </clone>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-instance_attributes">
+ <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <clone id="inactive-clone">
+ <meta_attributes id="inactive-clone-meta_attributes">
+ <nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
+ </meta_attributes>
+ <primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
+ </clone>
+ <group id="inactive-group">
+ <meta_attributes id="inactive-group-meta_attributes">
+ <nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
+ </meta_attributes>
+ <primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
+ <primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
+ </group>
+ <bundle id="httpd-bundle">
+ <docker image="pcmk:http" replicas="3"/>
+ <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
+ <port-mapping id="httpd-port" port="80"/>
+ </network>
+ <storage>
+ <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
+ <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
+ <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
+ </storage>
+ <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
+ <meta_attributes id="bundle-meta_attributes">
+ <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </bundle>
+ <group id="exim-group">
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
+ </instance_attributes>
+ </primitive>
+ <primitive id="Email" class="lsb" type="exim"/>
+ </group>
+ <clone id="mysql-clone-group">
+ <group id="mysql-group">
+ <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
+ <operations>
+ <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ </configuration>
+ <status>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>